cat <<'EOF' > tap.tf
provider "aws" {
region = "ap-northeast-1"
}
variable "project_prefix" {
description = "Prefix for resource names to indicate the project and environment"
type = string
default = "tap-sandbox"
}
resource "aws_vpc" "tap_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "${var.project_prefix}-vpc"
}
}
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.tap_vpc.id
}
# Public Subnets
resource "aws_subnet" "public_subnet" {
count = 3
vpc_id = aws_vpc.tap_vpc.id
cidr_block = ["10.0.0.0/23", "10.0.2.0/23", "10.0.4.0/23"][count.index]
availability_zone = element(["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"], count.index % 3)
map_public_ip_on_launch = true
tags = {
Name = "${var.project_prefix}-public-subnet-${count.index + 1}"
"kubernetes.io/role/elb" = "1"
}
}
resource "aws_route_table" "public_rt" {
vpc_id = aws_vpc.tap_vpc.id
}
resource "aws_route" "public_route" {
route_table_id = aws_route_table.public_rt.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
resource "aws_route_table_association" "public_rta" {
count = 3
subnet_id = aws_subnet.public_subnet[count.index].id
route_table_id = aws_route_table.public_rt.id
}
# Private Subnets
resource "aws_subnet" "private_subnet" {
count = 6
vpc_id = aws_vpc.tap_vpc.id
cidr_block = ["10.0.8.0/22", "10.0.12.0/22", "10.0.16.0/22", "10.0.20.0/23", "10.0.22.0/23", "10.0.24.0/23"][count.index]
availability_zone = element(["ap-northeast-1a", "ap-northeast-1c", "ap-northeast-1d"], count.index % 3)
tags = {
Name = "${var.project_prefix}-private-${element(["tap-1", "tap-2", "tap-3", "db-1", "db-2", "db-3"], count.index)}"
}
}
# Public NAT Gateway for Private Subnets
resource "aws_eip" "tap_nat_eip" {
domain = "vpc"
tags = {
Name = "${var.project_prefix}-tap-nat-eip"
}
}
resource "aws_nat_gateway" "tap_nat" {
allocation_id = aws_eip.tap_nat_eip.id
subnet_id = aws_subnet.public_subnet[0].id
tags = {
Name = "${var.project_prefix}-tap-nat"
}
}
# Route tables for tap-1, tap-2, tap-3 subnets to use the Private NAT Gateway
resource "aws_route_table" "private_tap_rt" {
count = 3 # For tap-1, tap-2, tap-3
vpc_id = aws_vpc.tap_vpc.id
tags = {
Name = "${var.project_prefix}-private-tap-${count.index}-rt"
}
}
resource "aws_route" "tap_private_route" {
count = 3 # For tap-1, tap-2, tap-3
route_table_id = element(aws_route_table.private_tap_rt.*.id, count.index)
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.tap_nat.id
}
resource "aws_route_table_association" "tap_rta" {
count = 3 # For tap-1, tap-2, tap-3
subnet_id = element(aws_subnet.private_subnet.*.id, count.index) # Adjust indices for tap-1, tap-2, tap-3
route_table_id = element(aws_route_table.private_tap_rt.*.id, count.index)
}
resource "aws_security_group" "allow_postgres" {
name = "${var.project_prefix}-allow-postgres"
description = "Allow PostgreSQL traffic from tap-1, tap-2, tap-3 subnets on port 5432"
vpc_id = aws_vpc.tap_vpc.id
ingress {
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = [aws_subnet.private_subnet[0].cidr_block, aws_subnet.private_subnet[1].cidr_block, aws_subnet.private_subnet[2].cidr_block]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.project_prefix}-allow-postgres"
}
}
resource "aws_db_subnet_group" "db_subnet_group" {
name = "${var.project_prefix}-db-subnet-group"
description = "RDS DB Subnet Group for db-1, db-2, db-3 subnets"
subnet_ids = [aws_subnet.private_subnet[3].id, aws_subnet.private_subnet[4].id, aws_subnet.private_subnet[5].id]
tags = {
Name = "${var.project_prefix}-db-subnet-group"
}
}
resource "aws_s3_bucket" "techdocs_bucket" {
bucket = "${var.project_prefix}-techdocs"
tags = {
Name = "${var.project_prefix}-techdocs"
}
}
resource "aws_s3_bucket_versioning" "techdocs_bucket_versioning" {
bucket = aws_s3_bucket.techdocs_bucket.id
versioning_configuration {
status = "Enabled"
}
}
EOF
terraform init
terraform plan -out plan
terraform apply plan
cat <<EOF > ../vars.sh
export VPC_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "tap_vpc").instances[0].attributes.id')
export PUBLIC_SUBNET_1_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "public_subnet").instances[0].attributes.id')
export PUBLIC_SUBNET_1_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "public_subnet").instances[0].attributes.availability_zone')
export PUBLIC_SUBNET_2_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "public_subnet").instances[1].attributes.id')
export PUBLIC_SUBNET_2_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "public_subnet").instances[1].attributes.availability_zone')
export PUBLIC_SUBNET_3_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "public_subnet").instances[2].attributes.id')
export PUBLIC_SUBNET_3_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "public_subnet").instances[2].attributes.availability_zone')
export PRIVATE_SUBNET_1_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[0].attributes.id')
export PRIVATE_SUBNET_1_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[0].attributes.availability_zone')
export PRIVATE_SUBNET_2_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[1].attributes.id')
export PRIVATE_SUBNET_2_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[1].attributes.availability_zone')
export PRIVATE_SUBNET_3_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[2].attributes.id')
export PRIVATE_SUBNET_3_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[2].attributes.availability_zone')
export PRIVATE_SUBNET_4_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[3].attributes.id')
export PRIVATE_SUBNET_4_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[3].attributes.availability_zone')
export PRIVATE_SUBNET_5_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[4].attributes.id')
export PRIVATE_SUBNET_5_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[4].attributes.availability_zone')
export PRIVATE_SUBNET_6_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[5].attributes.id')
export PRIVATE_SUBNET_6_AZ=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "private_subnet").instances[5].attributes.availability_zone')
export DB_SG_ID=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "allow_postgres").instances[0].attributes.id')
export DB_SUBNET_GROUP_NAME=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "db_subnet_group").instances[0].attributes.name')
export TECHDOCS_BUCKET_NAME=$(cat terraform.tfstate | jq -r '.resources[] | select(.name == "techdocs_bucket").instances[0].attributes.bucket')
EOF
cd ..
source vars.sh
cat <<EOF > eks-cluster-config.yaml
---
kind: ClusterConfig
apiVersion: eksctl.io/v1alpha5
metadata:
name: tap-sandbox
region: ap-northeast-1
version: "1.28"
managedNodeGroups:
- name: tap-sandbox-ng-1
minSize: 3
maxSize: 3
desiredCapacity: 3
volumeSize: 200
maxPodsPerNode: 110
instanceType: c5.xlarge
privateNetworking: true
spot: false
ssh:
allow: true
publicKeyPath: ~/.ssh/id_rsa.pub
addons:
- name: aws-ebs-csi-driver
wellKnownPolicies:
ebsCSIController: true
iam:
withOIDC: true
vpc:
id: ${VPC_ID}
subnets:
private:
${PRIVATE_SUBNET_1_AZ}:
id: ${PRIVATE_SUBNET_1_ID}
${PRIVATE_SUBNET_2_AZ}:
id: ${PRIVATE_SUBNET_2_ID}
${PRIVATE_SUBNET_3_AZ}:
id: ${PRIVATE_SUBNET_3_ID}
---
EOF
eksctl create cluster -f eks-cluster-config.yaml
$ kubectl get node -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
ip-10-0-12-228.ap-northeast-1.compute.internal Ready <none> 4m58s v1.28.5-eks-5e0fdde 10.0.12.228 <none> Amazon Linux 2 5.10.213-201.855.amzn2.x86_64 containerd://1.7.11
ip-10-0-19-37.ap-northeast-1.compute.internal Ready <none> 5m31s v1.28.5-eks-5e0fdde 10.0.19.37 <none> Amazon Linux 2 5.10.213-201.855.amzn2.x86_64 containerd://1.7.11
ip-10-0-8-251.ap-northeast-1.compute.internal Ready <none> 5m37s v1.28.5-eks-5e0fdde 10.0.8.251 <none> Amazon Linux 2 5.10.213-201.855.amzn2.x86_64 containerd://1.7.11
$ kubectl get nodes -o=custom-columns='NAME:.metadata.name,INSTANCE-TYPE:.metadata.labels.beta\.kubernetes\.io/instance-type,CAPACITY-TYPE:.metadata.labels.eks\.amazonaws\.com/capacityType,ZONE:.metadata.labels.failure-domain\.beta\.kubernetes\.io/zone'
NAME INSTANCE-TYPE CAPACITY-TYPE ZONE
ip-10-0-12-228.ap-northeast-1.compute.internal c5.xlarge ON_DEMAND ap-northeast-1c
ip-10-0-19-37.ap-northeast-1.compute.internal c5.xlarge ON_DEMAND ap-northeast-1d
ip-10-0-8-251.ap-northeast-1.compute.internal c5.xlarge ON_DEMAND ap-northeast-1a
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
export AWS_REGION=ap-northeast-1
export EKS_CLUSTER_NAME=tap-sandbox
aws ecr create-repository --repository-name tap-images --region $AWS_REGION
aws ecr create-repository --repository-name tap-build-service --region $AWS_REGION
aws ecr create-repository --repository-name full-deps-package-repo --region $AWS_REGION
aws ecr create-repository --repository-name tap-lsp --region $AWS_REGION
aws ecr create-repository --repository-name tanzu-cluster-essentials --region $AWS_REGION
# Retrieve the OIDC endpoint from the Kubernetes cluster and store it for use in the policy.
export OIDCPROVIDER=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --region $AWS_REGION --output json | jq '.cluster.identity.oidc.issuer' | tr -d '"' | sed 's/https:\/\///')
cat << EOF > build-service-trust-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDCPROVIDER}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${OIDCPROVIDER}:aud": "sts.amazonaws.com"
},
"StringLike": {
"${OIDCPROVIDER}:sub": [
"system:serviceaccount:kpack:controller",
"system:serviceaccount:build-service:dependency-updater-controller-serviceaccount"
]
}
}
}
]
}
EOF
cat << EOF > build-service-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ecr:DescribeRegistry",
"ecr:GetAuthorizationToken",
"ecr:GetRegistryPolicy",
"ecr:PutRegistryPolicy",
"ecr:PutReplicationConfiguration",
"ecr:DeleteRegistryPolicy"
],
"Resource": "*",
"Effect": "Allow",
"Sid": "TAPEcrBuildServiceGlobal"
},
{
"Action": [
"ecr:DescribeImages",
"ecr:ListImages",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:BatchGetRepositoryScanningConfiguration",
"ecr:DescribeImageReplicationStatus",
"ecr:DescribeImageScanFindings",
"ecr:DescribeRepositories",
"ecr:GetDownloadUrlForLayer",
"ecr:GetLifecyclePolicy",
"ecr:GetLifecyclePolicyPreview",
"ecr:GetRegistryScanningConfiguration",
"ecr:GetRepositoryPolicy",
"ecr:ListTagsForResource",
"ecr:TagResource",
"ecr:UntagResource",
"ecr:BatchDeleteImage",
"ecr:BatchImportUpstreamImage",
"ecr:CompleteLayerUpload",
"ecr:CreatePullThroughCacheRule",
"ecr:CreateRepository",
"ecr:DeleteLifecyclePolicy",
"ecr:DeletePullThroughCacheRule",
"ecr:DeleteRepository",
"ecr:InitiateLayerUpload",
"ecr:PutImage",
"ecr:PutImageScanningConfiguration",
"ecr:PutImageTagMutability",
"ecr:PutLifecyclePolicy",
"ecr:PutRegistryScanningConfiguration",
"ecr:ReplicateImage",
"ecr:StartImageScan",
"ecr:StartLifecyclePolicyPreview",
"ecr:UploadLayerPart",
"ecr:DeleteRepositoryPolicy",
"ecr:SetRepositoryPolicy"
],
"Resource": [
"arn:aws:ecr:${AWS_REGION}:${AWS_ACCOUNT_ID}:repository/full-deps-package-repo",
"arn:aws:ecr:${AWS_REGION}:${AWS_ACCOUNT_ID}:repository/tap-build-service",
"arn:aws:ecr:${AWS_REGION}:${AWS_ACCOUNT_ID}:repository/tap-images"
],
"Effect": "Allow",
"Sid": "TAPEcrBuildServiceScoped"
}
]
}
EOF
cat << EOF > workload-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ecr:DescribeRegistry",
"ecr:GetAuthorizationToken",
"ecr:GetRegistryPolicy",
"ecr:PutRegistryPolicy",
"ecr:PutReplicationConfiguration",
"ecr:DeleteRegistryPolicy"
],
"Resource": "*",
"Effect": "Allow",
"Sid": "TAPEcrWorkloadGlobal"
},
{
"Action": [
"ecr:DescribeImages",
"ecr:ListImages",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:BatchGetRepositoryScanningConfiguration",
"ecr:DescribeImageReplicationStatus",
"ecr:DescribeImageScanFindings",
"ecr:DescribeRepositories",
"ecr:GetDownloadUrlForLayer",
"ecr:GetLifecyclePolicy",
"ecr:GetLifecyclePolicyPreview",
"ecr:GetRegistryScanningConfiguration",
"ecr:GetRepositoryPolicy",
"ecr:ListTagsForResource",
"ecr:TagResource",
"ecr:UntagResource",
"ecr:BatchDeleteImage",
"ecr:BatchImportUpstreamImage",
"ecr:CompleteLayerUpload",
"ecr:CreatePullThroughCacheRule",
"ecr:CreateRepository",
"ecr:DeleteLifecyclePolicy",
"ecr:DeletePullThroughCacheRule",
"ecr:DeleteRepository",
"ecr:InitiateLayerUpload",
"ecr:PutImage",
"ecr:PutImageScanningConfiguration",
"ecr:PutImageTagMutability",
"ecr:PutLifecyclePolicy",
"ecr:PutRegistryScanningConfiguration",
"ecr:ReplicateImage",
"ecr:StartImageScan",
"ecr:StartLifecyclePolicyPreview",
"ecr:UploadLayerPart",
"ecr:DeleteRepositoryPolicy",
"ecr:SetRepositoryPolicy"
],
"Resource": [
"arn:aws:ecr:${AWS_REGION}:${AWS_ACCOUNT_ID}:repository/full-deps-package-repo",
"arn:aws:ecr:${AWS_REGION}:${AWS_ACCOUNT_ID}:repository/tanzu-application-platform/*"
],
"Effect": "Allow",
"Sid": "TAPEcrWorkloadScoped"
}
]
}
EOF
cat << EOF > workload-trust-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDCPROVIDER}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringLike": {
"${OIDCPROVIDER}:sub": "system:serviceaccount:*:default",
"${OIDCPROVIDER}:aud": "sts.amazonaws.com"
}
}
}
]
}
EOF
cat << EOF > local-source-proxy-trust-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDCPROVIDER}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${OIDCPROVIDER}:aud": "sts.amazonaws.com"
},
"StringLike": {
"${OIDCPROVIDER}:sub": [
"system:serviceaccount:tap-local-source-system:proxy-manager"
]
}
}
}
]
}
EOF
cat << EOF > local-source-proxy-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ecr:GetAuthorizationToken"
],
"Resource": "*",
"Effect": "Allow",
"Sid": "TAPLSPGlobal"
},
{
"Effect": "Allow",
"Action": [
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:DescribeImages",
"ecr:BatchGetImage",
"ecr:GetLifecyclePolicy",
"ecr:GetLifecyclePolicyPreview",
"ecr:ListTagsForResource",
"ecr:DescribeImageScanFindings",
"ecr:InitiateLayerUpload",
"ecr:UploadLayerPart",
"ecr:CompleteLayerUpload",
"ecr:PutImage"
],
"Resource": [
"arn:aws:ecr:${AWS_REGION}:${AWS_ACCOUNT_ID}:repository/tap-lsp"
],
"Sid": "TAPLSPScoped"
}
]
}
EOF
cat << EOF > cert-manager-trust-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDCPROVIDER}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${OIDCPROVIDER}:aud": "sts.amazonaws.com"
},
"StringLike": {
"${OIDCPROVIDER}:sub": [
"system:serviceaccount:cert-manager:cert-manager"
]
}
}
}
]
}
EOF
cat <<EOF > cert-manager-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "route53:GetChange",
"Resource": "arn:aws:route53:::change/*"
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets"
],
"Resource": "arn:aws:route53:::hostedzone/*"
},
{
"Effect": "Allow",
"Action": "route53:ListHostedZonesByName",
"Resource": "*"
}
]
}
EOF
cat << EOF > tdp-techdocs-trust-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDCPROVIDER}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${OIDCPROVIDER}:aud": "sts.amazonaws.com"
},
"StringLike": {
"${OIDCPROVIDER}:sub": [
"system:serviceaccount:tap-gui:tap-gui"
]
}
}
}
]
}
EOF
cat <<EOF > tdp-techdocs-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ReadTechDocs",
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::${TECHDOCS_BUCKET_NAME}",
"arn:aws:s3:::${TECHDOCS_BUCKET_NAME}/*"
]
}
]
}
EOF
cat << EOF > aws-services-trust-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDCPROVIDER}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${OIDCPROVIDER}:aud": "sts.amazonaws.com"
},
"StringLike": {
"${OIDCPROVIDER}:sub": [
"system:serviceaccount:crossplane-system:provider-aws-*"
]
}
}
}
]
}
EOF
cat <<EOF > aws-services-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"rds:CreateDBInstance",
"rds:DeleteDBInstance",
"rds:DescribeDBInstances",
"rds:StopDBInstance",
"rds:StartDBInstance",
"rds:ModifyDBInstance",
"rds:AddTagsToResource",
"rds:DescribeDBSubnetGroups",
"rds:ListTagsForResource",
"ec2:DescribeSecurityGroups"
],
"Resource": "*"
}
]
}
EOF
aws iam create-role --role-name tap-build-service-${EKS_CLUSTER_NAME} --assume-role-policy-document file://build-service-trust-policy.json
aws iam put-role-policy --role-name tap-build-service-${EKS_CLUSTER_NAME} --policy-name tapBuildServicePolicy-${EKS_CLUSTER_NAME} --policy-document file://build-service-policy.json
aws iam create-role --role-name tap-workload-${EKS_CLUSTER_NAME} --assume-role-policy-document file://workload-trust-policy.json
aws iam put-role-policy --role-name tap-workload-${EKS_CLUSTER_NAME} --policy-name tapWorkload-${EKS_CLUSTER_NAME} --policy-document file://workload-policy.json
aws iam create-role --role-name tap-local-source-proxy-${EKS_CLUSTER_NAME} --assume-role-policy-document file://local-source-proxy-trust-policy.json
aws iam put-role-policy --role-name tap-local-source-proxy-${EKS_CLUSTER_NAME} --policy-name tapLocalSourcePolicy-${EKS_CLUSTER_NAME} --policy-document file://local-source-proxy-policy.json
aws iam create-role --role-name tap-cert-manager-${EKS_CLUSTER_NAME} --assume-role-policy-document file://cert-manager-trust-policy.json
aws iam put-role-policy --role-name tap-cert-manager-${EKS_CLUSTER_NAME} --policy-name tapCertManagerPolicy-${EKS_CLUSTER_NAME} --policy-document file://cert-manager-policy.json
aws iam create-role --role-name tap-tdp-techdocs-${EKS_CLUSTER_NAME} --assume-role-policy-document file://tdp-techdocs-trust-policy.json
aws iam put-role-policy --role-name tap-tdp-techdocs-${EKS_CLUSTER_NAME} --policy-name tapTdpTechdocsPolicy-${EKS_CLUSTER_NAME} --policy-document file://tdp-techdocs-policy.json
aws iam create-role --role-name tap-aws-services-${EKS_CLUSTER_NAME} --assume-role-policy-document file://aws-services-trust-policy.json
aws iam put-role-policy --role-name tap-aws-services-${EKS_CLUSTER_NAME} --policy-name tapAwsServicesPolicy-${EKS_CLUSTER_NAME} --policy-document file://aws-services-policy.json
export TANZUNET_USERNAME=...
export TANZUNET_PASSWORD=...
export TANZU_API_TOKEN=...
pivnet login --api-token=${TANZU_API_TOKEN}
ECR_REPO=$(aws ecr describe-repositories --region ap-northeast-1 --query 'repositories[?repositoryName==`tanzu-cluster-essentials`].repositoryUri' --output text)
aws ecr get-login-password --region ${AWS_REGION} | docker login ${ECR_REPO} -u AWS --password-stdin
IMGPKG_REGISTRY_HOSTNAME=registry.tanzu.vmware.com \
IMGPKG_REGISTRY_USERNAME=$TANZUNET_USERNAME \
IMGPKG_REGISTRY_PASSWORD=$TANZUNET_PASSWORD \
imgpkg copy \
-b registry.tanzu.vmware.com/tanzu-cluster-essentials/cluster-essentials-bundle:1.8.1 \
--to-repo ${ECR_REPO} \
--include-non-distributable-layers
pivnet download-product-files --product-slug='tanzu-cluster-essentials' --release-version='1.8.1' --glob='tanzu-cluster-essentials-linux-amd64-*'
mkdir -p tanzu-cluster-essentials
tar -xvf tanzu-cluster-essentials-*.tgz -C tanzu-cluster-essentials
cd tanzu-cluster-essentials
INSTALL_BUNDLE=${ECR_REPO}:1.8.1 \
INSTALL_REGISTRY_HOSTNAME=dummy.example.com \
INSTALL_REGISTRY_USERNAME=dummy \
INSTALL_REGISTRY_PASSWORD=dummy \
./install.sh --yes
cd ..
ECR_REPO=$(aws ecr describe-repositories --region ap-northeast-1 --query 'repositories[?repositoryName==`tap-images`].repositoryUri' --output text)
aws ecr get-login-password --region ${AWS_REGION} | docker login ${ECR_REPO} -u AWS --password-stdin
IMGPKG_REGISTRY_HOSTNAME=registry.tanzu.vmware.com \
IMGPKG_REGISTRY_USERNAME=$TANZUNET_USERNAME \
IMGPKG_REGISTRY_PASSWORD=$TANZUNET_PASSWORD \
imgpkg copy \
--concurrency 16 \
-b registry.tanzu.vmware.com/tanzu-application-platform/tap-packages:1.9.0 \
--to-repo ${ECR_REPO} \
--include-non-distributable-layers
ECR_REPO=$(aws ecr describe-repositories --region ap-northeast-1 --query 'repositories[?repositoryName==`full-deps-package-repo`].repositoryUri' --output text)
aws ecr get-login-password --region ap-northeast-1 | docker login ${ECR_REPO} -u AWS --password-stdin
IMGPKG_REGISTRY_HOSTNAME=registry.tanzu.vmware.com \
IMGPKG_REGISTRY_USERNAME=$TANZUNET_USERNAME \
IMGPKG_REGISTRY_PASSWORD=$TANZUNET_PASSWORD \
imgpkg copy \
-b registry.tanzu.vmware.com/tanzu-application-platform/full-deps-package-repo:1.9.0 \
--to-repo ${ECR_REPO} \
--include-non-distributable-layers
kubectl create ns tap-install
ECR_REPO=$(aws ecr describe-repositories --region ap-northeast-1 --query 'repositories[?repositoryName==`tap-images`].repositoryUri' --output text)
aws ecr get-login-password --region ${AWS_REGION} | docker login ${ECR_REPO} -u AWS --password-stdin
tanzu package repository add tanzu-tap-repository \
--url ${ECR_REPO}:1.9.0 \
--namespace tap-install
cat <<'EOF' > ootb-templates-ecr-repo-template.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: ootb-templates-ecr-repo-template
namespace: tap-install
type: Opaque
stringData:
ecr-repo-template.yaml: |
apiVersion: v1
kind: Namespace
metadata:
name: tekton-tasks
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: create-ecr-repo
namespace: tekton-tasks
spec:
params:
- name: namespace
type: string
- name: workload-name
type: string
steps:
- name: check
image: bitnami/aws-cli
script: |
#!/bin/bash
check_and_create_repository() {
local repository_name=$1
aws ecr describe-repositories --repository-names "$repository_name" > /dev/null 2>&1
if [ $? -eq 254 ]; then
echo "Repository '$repository_name' does not exist. Creating repository..."
aws ecr create-repository --repository-name "$repository_name"
else
echo "Repository '$repository_name' already exists."
fi
}
WORKLOAD_NAME="$(params.workload-name)"
NAMESPACE="$(params.namespace)"
check_and_create_repository tanzu-application-platform/${WORKLOAD_NAME}-${NAMESPACE}
check_and_create_repository tanzu-application-platform/${WORKLOAD_NAME}-${NAMESPACE}-bundle
check_and_create_repository tanzu-application-platform/${WORKLOAD_NAME}-${NAMESPACE}-scan-results
securityContext:
runAsUser: 0
---
#@ load("@ytt:data", "data")
apiVersion: carto.run/v1alpha1
kind: ClusterSourceTemplate
metadata:
name: ecr-repo-template
spec:
params:
- name: serviceAccount
default: default
urlPath: .spec.params[?(@.name=="source-url")].value
revisionPath: .spec.params[?(@.name=="source-revision")].value
lifecycle: tekton
#@ label_exclusions = "[\"" + "\", \"".join(data.values.label_propagation_exclusions) + "\"]"
#@yaml/text-templated-strings
ytt: |
#@ load("@ytt:data", "data")
#@ def merge_labels(fixed_values):
#@ labels = {}
#@ if hasattr(data.values.workload.metadata, "labels"):
#@ exclusions = (@= label_exclusions @)
#@ for k,v in dict(data.values.workload.metadata.labels).items():
#@ if k not in exclusions:
#@ labels[k] = v
#@ end
#@ end
#@ end
#@ labels.update(fixed_values)
#@ return labels
#@ end
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
generateName: #@ data.values.workload.metadata.name + "-ecr-repo-"
labels: #@ merge_labels({ "app.kubernetes.io/component": "ecr-repo" })
spec:
serviceAccountName: #@ data.values.params.serviceAccount
taskRef:
resolver: cluster
params:
- name: kind
value: task
- name: namespace
value: tekton-tasks
- name: name
value: create-ecr-repo
params:
- name: namespace
value: #@ data.values.workload.metadata.namespace
- name: workload-name
value: #@ data.values.workload.metadata.name
- name: source-url
value: #@ data.values.source.url
- name: source-revision
value: #@ data.values.source.revision
---
EOF
kubectl apply -f ootb-templates-ecr-repo-template.yaml
cat <<'EOF' > ootb-supply-chain-add-ecr-repo-template.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: ootb-supply-chain-add-ecr-repo-template
namespace: tap-install
type: Opaque
stringData:
add-ecr-repo-template.yaml: |
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.and_op(overlay.subset({"kind": "ClusterSupplyChain"}), lambda i,left,right: left["metadata"]["name"].startswith("source-")), expects="1+"
---
spec:
resources:
#@overlay/match by=overlay.subset({"name": "source-tester"})
#@overlay/insert before=True
- name: ecr-repo
templateRef:
kind: ClusterSourceTemplate
name: ecr-repo-template
sources:
- resource: source-provider
name: source
#@overlay/match by="name"
- name: source-tester
sources:
#@overlay/match by="name"
- name: source
resource: ecr-repo
---
EOF
kubectl apply -f ootb-supply-chain-add-ecr-repo-template.yaml
cat <<'EOF' > ootb-templates-overlay-kaniko-build.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: ootb-templates-overlay-kaniko-build
namespace: tap-install
type: Opaque
stringData:
overlay-kaniko-build.yml: |
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"kind":"Task", "metadata": {"name": "kaniko-build"}})
---
spec:
steps:
#@overlay/match by="name"
- name: copy-docker-config-and-certs
script: |
#!/usr/bin/env bash
set -o errexit
set -o xtrace
if [ -f "/tekton/creds/.docker/config.json" ]; then
cp /tekton/creds/.docker/config.json /docker-config/
fi
cp /etc/ssl/certs/ca-certificates.crt /certs/ca-certificates.crt
if [[ ! -z "$(params.ca_cert_data)" ]]; then
echo "$(params.ca_cert_data)" >> /certs/ca-certificates.crt
fi
---
EOF
kubectl apply -f ootb-templates-overlay-kaniko-build.yaml
cat <<EOF > cert-manager-irsa.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: cert-manager-irsa
namespace: tap-install
type: Opaque
stringData:
cert-manager-irsa.yaml: |
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"kind": "ServiceAccount", "metadata": {"name": "cert-manager"}})
---
metadata:
#@overlay/match missing_ok=True
annotations:
#@overlay/match missing_ok=True
eks.amazonaws.com/role-arn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/tap-cert-manager-${EKS_CLUSTER_NAME}
#@overlay/match by=overlay.subset({"kind": "Deployment", "metadata": {"name": "cert-manager"}})
---
spec:
template:
spec:
#@overlay/match missing_ok=True
securityContext:
#@overlay/match missing_ok=True
fsGroup: 1001
---
EOF
kubectl apply -f cert-manager-irsa.yaml
# example
DNS_ZONE=aws.maki.lol
cat <<EOF > cert-manager-cluster-issuer.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: cert-manager-cluster-issuer
namespace: tap-install
type: Opaque
stringData:
cert-cluster-issuer.yaml: |
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: user@yourdomain.com
privateKeySecretRef:
name: letsencrypt
solvers:
- selector:
dnsZones:
- ${DNS_ZONE}
dns01:
route53:
region: ap-northeast-1
---
EOF
kubectl apply -f cert-manager-cluster-issuer.yaml
kubectl apply -f https://gist.github.com/making/796cc4d9e151292553bf52c8f79d0f18/raw/080a1135182c0d0f8d81bfe08feff2a23fdafce2/tap-gui-db.yaml
cat <<EOF > tap-gui-irsa.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: tap-gui-irsa
namespace: tap-install
type: Opaque
stringData:
tap-gui-irsa.yaml: |
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"kind": "ServiceAccount", "metadata": {"name": "tap-gui"}})
---
metadata:
#@overlay/match missing_ok=True
annotations:
#@overlay/match missing_ok=True
eks.amazonaws.com/role-arn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/tap-tdp-techdocs-${EKS_CLUSTER_NAME}
---
EOF
kubectl apply -f tap-gui-irsa.yaml
cat <<EOF > cnrs-autocreate-cluster-domain-claims.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: cnrs-autocreate-cluster-domain-claims
namespace: tap-install
type: Opaque
stringData:
cnrs-autocreate-cluster-domain-claims.yaml: |
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"metadata":{"name":"config-network"}, "kind": "ConfigMap"})
---
data:
#@overlay/match missing_ok=True
autocreate-cluster-domain-claims: "true"
---
EOF
kubectl apply -f cnrs-autocreate-cluster-domain-claims.yaml
サクッとGC https://knative.dev/docs/serving/revisions/revision-admin-config-options/#garbage-collection
cat <<EOF > cnrs-gc.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: cnrs-gc
namespace: tap-install
type: Opaque
stringData:
cnrs-gc.yaml: |
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"metadata":{"name":"config-gc"}, "kind": "ConfigMap"})
---
data:
#@overlay/match missing_ok=True
min-non-active-revisions: "0"
#@overlay/match missing_ok=True
max-non-active-revisions: "0"
#@overlay/match missing_ok=True
retain-since-create-time: "disabled"
#@overlay/match missing_ok=True
retain-since-last-active-time: "disabled"
---
EOF
kubectl apply -f cnrs-gc.yaml
cat << EOF > tap-values.yaml
---
shared:
ingress_domain: ${DNS_ZONE}
ingress_issuer: letsencrypt
ceip_policy_disclosed: true
profile: full
supply_chain: testing_scanning
ootb_supply_chain_testing_scanning:
registry:
server: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com
repository: tanzu-application-platform
contour:
infrastructure_provider: aws
envoy:
workload:
type: Deployment
replicas: 2
service:
type: LoadBalancer
aws:
LBType: nlb
contour:
replicas: 1
configFileContents:
accesslog-format: json
buildservice:
kp_default_repository: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/tap-build-service
kp_default_repository_aws_iam_role_arn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/tap-build-service-${EKS_CLUSTER_NAME}
exclude_dependencies: true
local_source_proxy:
repository: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/tap-lsp
push_secret:
aws_iam_role_arn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/tap-local-source-proxy-${EKS_CLUSTER_NAME}
ootb_templates:
iaas_auth: true
springboot_conventions:
autoConfigureActuators: true
tap_gui:
app_config:
auth:
environment: development
session:
secret: opensesami
allowGuestAccess: true
techdocs:
builder: 'external'
publisher:
type: 'awsS3'
awsS3:
bucketName: ${TECHDOCS_BUCKET_NAME}
region: ${AWS_REGION}
backend:
database:
client: pg
connection:
host: \${TAP_GUI_DB_POSTGRESQL_SERVICE_HOST}
port: \${TAP_GUI_DB_POSTGRESQL_SERVICE_PORT}
user: tap-gui
password: \${POSTGRES_PASSWORD}
customize:
features:
supplyChain:
enableTriageUI: true
metadata_store:
ns_for_export_app_cert: "*"
app_service_type: ClusterIP
pg_req_cpu: "200m"
pg_req_memory: "200Mi"
namespace_provisioner:
aws_iam_role_arn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/tap-workload-${EKS_CLUSTER_NAME}
package_overlays:
- name: ootb-templates
secrets:
- name: ootb-templates-ecr-repo-template
- name: ootb-templates-overlay-kaniko-build
- name: ootb-supply-chain-testing-scanning
secrets:
- name: ootb-supply-chain-add-ecr-repo-template
- name: cert-manager
secrets:
- name: cert-manager-cluster-issuer
- name: cert-manager-irsa
- name: tap-gui
secrets:
- name: tap-gui-db
- name: tap-gui-irsa
- name: cnrs
secrets:
- name: cnrs-autocreate-cluster-domain-claims
- name: cnrs-gc
cnrs:
allow_manual_configmap_update: false
# 以下リソース節約用
lite:
enable: true
pdb:
enable: false
cartographer:
cartographer:
resources:
requests:
cpu: 100m
memory: 200Mi
crossplane:
orphan_resources: false
resourcesCrossplane:
requests:
cpu: 100m
memory: 200Mi
resourcesRBACManager:
requests:
cpu: 100m
memory: 200Mi
excluded_packages:
- sso.apps.tanzu.vmware.com
- api-portal.tanzu.vmware.com
---
EOF
tanzu package install tap -p tap.tanzu.vmware.com -v 1.9.0 --values-file tap-values.yaml -n tap-install
| kapp: Error: waiting on reconcile packageinstall/tap-gui (packaging.carvel.dev/v1alpha1) namespace: tap-install:
| Finished unsuccessfully (Reconcile failed: (message: kapp: Error: waiting on reconcile secret/metadata-store-access-token (v1) namespace: tap-gui:
| Errored:
| Getting resource secret/metadata-store-access-token (v1) namespace: tap-gui:
| API server says: secrets "metadata-store-access-token" not found (reason: NotFound)))
| Deploying: Error (see .status.usefulErrorMessage for details)
$ kubectl get pkgi -n tap-install
NAME PACKAGE NAME PACKAGE VERSION DESCRIPTION AGE
accelerator accelerator.apps.tanzu.vmware.com 1.9.1 Reconcile succeeded 3m11s
amr-observer amr-observer.apps.tanzu.vmware.com 0.5.0 Reconcile succeeded 3m11s
api-auto-registration apis.apps.tanzu.vmware.com 0.5.0 Reconcile succeeded 3m11s
app-scanning app-scanning.apps.tanzu.vmware.com 0.4.0 Reconcile succeeded 3m14s
appliveview backend.appliveview.tanzu.vmware.com 1.9.1 Reconcile succeeded 3m11s
appliveview-apiserver apiserver.appliveview.tanzu.vmware.com 1.9.1 Reconcile succeeded 3m52s
appliveview-connector connector.appliveview.tanzu.vmware.com 1.9.1 Reconcile succeeded 4m38s
appliveview-conventions conventions.appliveview.tanzu.vmware.com 1.9.1 Reconcile succeeded 3m26s
bitnami-services bitnami.services.tanzu.vmware.com 0.5.0-rc.3 Reconcile succeeded 3m33s
buildservice buildservice.tanzu.vmware.com 1.13.0 Reconcile succeeded 4m38s
cartographer cartographer.tanzu.vmware.com 0.9.0 Reconcile succeeded 3m52s
cartographer-conventions cartographer.conventions.apps.tanzu.vmware.com 0.8.0 Reconcile succeeded 3m52s
cert-manager cert-manager.tanzu.vmware.com 2.7.2 Reconcile succeeded 4m38s
cnrs cnrs.tanzu.vmware.com 2.5.3 Reconcile succeeded 3m11s
contour contour.tanzu.vmware.com 2.2.0 Reconcile succeeded 3m52s
crossplane crossplane.tanzu.vmware.com 0.5.0-rc.3 Reconcile succeeded 4m38s
developer-conventions developer-conventions.tanzu.vmware.com 0.16.1 Reconcile succeeded 3m26s
fluxcd-source-controller fluxcd.source.controller.tanzu.vmware.com 1.1.2+tanzu.1 Reconcile succeeded 4m38s
local-source-proxy local-source-proxy.apps.tanzu.vmware.com 0.2.1 Reconcile succeeded 4m38s
metadata-store metadata-store.apps.tanzu.vmware.com 1.9.0 Reconcile succeeded 3m11s
namespace-provisioner namespace-provisioner.apps.tanzu.vmware.com 0.6.2 Reconcile succeeded 3m29s
ootb-delivery-basic ootb-delivery-basic.tanzu.vmware.com 0.16.1 Reconcile succeeded 3m14s
ootb-supply-chain-testing-scanning ootb-supply-chain-testing-scanning.tanzu.vmware.com 0.16.1 Reconcile succeeded 3m14s
ootb-templates ootb-templates.tanzu.vmware.com 0.16.1 Reconcile succeeded 3m26s
policy-controller policy.apps.tanzu.vmware.com 1.6.4 Reconcile succeeded 3m52s
scanning scanning.apps.tanzu.vmware.com 1.9.1 Reconcile succeeded 3m52s
service-bindings servicebinding.tanzu.vmware.com 0.12.0-rc.2 Reconcile succeeded 3m52s
services-toolkit services-toolkit.tanzu.vmware.com 0.14.0-rc.5 Reconcile succeeded 3m52s
source-controller controller.source.apps.tanzu.vmware.com 0.9.0 Reconcile succeeded 3m52s
spring-boot-conventions spring-boot-conventions.tanzu.vmware.com 1.9.1 Reconcile succeeded 3m26s
tap tap.tanzu.vmware.com 1.9.0 Reconcile succeeded 4m49s
tap-auth tap-auth.tanzu.vmware.com 1.1.0 Reconcile succeeded 4m38s
tap-gui tap-gui.tanzu.vmware.com 1.9.1 Reconcile succeeded 3m11s
tap-telemetry tap-telemetry.tanzu.vmware.com 0.7.0 Reconcile succeeded 4m47s
tekton-pipelines tekton.tanzu.vmware.com 0.50.3+tanzu.4 Reconcile succeeded 4m38s
$ kubectl get svc -n tanzu-system-ingress envoy
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
envoy LoadBalancer 172.20.115.207 adcc2ceb7eb054d08aa1d2951c945258-32227aa03228e7ff.elb.ap-northeast-1.amazonaws.com 80:32598/TCP,443:32194/TCP 3m24s
$ kubectl get httpproxy -A
NAMESPACE NAME FQDN TLS SECRET STATUS STATUS DESCRIPTION
api-auto-registration api-auto-registration-controller api-auto-registration.aws.maki.lol api-auto-registration-cert valid Valid HTTPProxy
metadata-store amr-cloudevent-handler-ingress amr-cloudevent-handler.aws.maki.lol amr-cloudevent-handler-ingress-cert valid Valid HTTPProxy
metadata-store amr-graphql-ingress amr-graphql.aws.maki.lol amr-ingress-cert valid Valid HTTPProxy
metadata-store metadata-store-ingress metadata-store.aws.maki.lol ingress-cert valid Valid HTTPProxy
tap-gui tap-gui tap-gui.aws.maki.lol tap-gui-cert valid Valid HTTPProxy
ECR_REPO=$(aws ecr describe-repositories --region ap-northeast-1 --query 'repositories[?repositoryName==`full-deps-package-repo`].repositoryUri' --output text)
aws ecr get-login-password --region ap-northeast-1 | docker login ${ECR_REPO} -u AWS --password-stdin
tanzu package repository add full-deps-package-repo \
--url ${ECR_REPO}:1.9.0 \
--namespace tap-install
cat << EOF > full-deps-values.yaml
---
kp_default_repository: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/tap-build-service
kp_default_repository_aws_iam_role_arn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/tap-build-service-${EKS_CLUSTER_NAME}
enable_ubi_builder: true
---
EOF
tanzu package install full-deps -p full-deps.buildservice.tanzu.vmware.com -v "> 0.0.0" -n tap-install --values-file full-deps-values.yaml
$ kubectl get clusterbuilder
NAME LATESTIMAGE READY UPTODATE
base-jammy 532912407632.dkr.ecr.ap-northeast-1.amazonaws.com/tap-build-service:base-jammy-builder@sha256:e58c3005c9f522aa8a6b7dd797161d51e175561e04e77ca1ad06bd4ea3e38534 True True
default 532912407632.dkr.ecr.ap-northeast-1.amazonaws.com/tap-build-service:default-builder@sha256:05768d1b6e663b6acceeac0e0a65b9fcb7eff92b0bed2adbaf3938a6b4c67cab True True
full-jammy 532912407632.dkr.ecr.ap-northeast-1.amazonaws.com/tap-build-service:full-jammy-builder@sha256:4dd0a9f60198ad568e2af2db0b1424ed750ed7b38677cf3592d78cdf79498a69 True True
standard-ubi-8 532912407632.dkr.ecr.ap-northeast-1.amazonaws.com/tap-build-service:standard-ubi-8-builder@sha256:5464872ad9d22c49ac68b921be984cee3323f998a4fd6aaa6cbc730705bd2fd1 True True
tiny-jammy 532912407632.dkr.ecr.ap-northeast-1.amazonaws.com/tap-build-service:tiny-jammy-builder@sha256:d448d76a140f26f0c03a2fd098d37d238a2b8718e56e6cf1bf4fa18416f10da4 True True
kubectl create ns apps
kubectl label namespaces apps apps.tanzu.vmware.com/tap-ns=""
kubectl apply -f - -n apps << 'EOF'
---
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: skip-test-pipeline
labels:
apps.tanzu.vmware.com/pipeline: test
apps.tanzu.vmware.com/language: skip
spec:
params:
- name: source-url
- name: source-revision
tasks:
- name: test
params:
- name: source-url
value: $(params.source-url)
- name: source-revision
value: $(params.source-revision)
taskSpec:
params:
- name: source-url
- name: source-revision
steps:
- name: test
image: alpine
script: |-
echo 'skip'
---
EOF
kubectl apply -f - -n apps << 'EOF'
---
apiVersion: scanning.apps.tanzu.vmware.com/v1beta1
kind: ScanPolicy
metadata:
labels:
app.kubernetes.io/part-of: enable-in-gui
name: scan-policy
spec:
regoFile: |
package main
# Accepted Values: "Critical", "High", "Medium", "Low", "Negligible", "UnknownSeverity"
notAllowedSeverities := ["UnknownSeverity"]
ignoreCves := []
contains(array, elem) = true {
array[_] = elem
} else = false { true }
isSafe(match) {
severities := { e | e := match.ratings.rating.severity } | { e | e := match.ratings.rating[_].severity }
some i
fails := contains(notAllowedSeverities, severities[i])
not fails
}
isSafe(match) {
ignore := contains(ignoreCves, match.id)
ignore
}
deny[msg] {
comps := { e | e := input.bom.components.component } | { e | e := input.bom.components.component[_] }
some i
comp := comps[i]
vulns := { e | e := comp.vulnerabilities.vulnerability } | { e | e := comp.vulnerabilities.vulnerability[_] }
some j
vuln := vulns[j]
ratings := { e | e := vuln.ratings.rating.severity } | { e | e := vuln.ratings.rating[_].severity }
not isSafe(vuln)
msg = sprintf("CVE %s %s %s", [comp.name, vuln.id, ratings])
}
---
EOF
tanzu apps workload apply hello-nodejs \
--app hello-nodejs \
--git-repo https://github.com/making/hello-nodejs \
--git-branch master \
--type web \
--label apps.tanzu.vmware.com/has-tests=true \
-n apps \
-y