This commit is contained in:
jooho
2021-12-26 17:08:41 +09:00
parent d6e543eb24
commit 628f2ace5a
97 changed files with 3958 additions and 0 deletions

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: backend-configmap
labels:
env: production
tier: backend
app: configmap
name: backend-configmap
data:
# default namespace 기준
EUREKA_INSTANCE_HOSTNAME: "discovery.default.svc.cluster.local"
APIGATEWAY_HOSTNAME: "apigateway.default.svc.cluster.local"
SPRING_CLOUD_CONFIG_URI: "http://config.default.svc.cluster.local:8888"
MYSQL_HOSTNAME: "mysql.default.svc.cluster.local"
RABBITMQ_HOSTNAME: "rabbitmq.default.svc.cluster.local"
ZIPKIN_HOSTNAME: "zipkin.default.svc.cluster.local"

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: common-configmap
labels:
env: production
tier: backend
app: configmap
name: common-configmap
data:
APP_HOME: "/srv/nfs"
TZ: "Asia/Seoul"

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-configmap
labels:
env: production
tier: database
app: configmap
name: mysql-configmap
data:
mysql-user: "msaportal"
mysql-database: "msaportal"
mysql-allow-empty-password: "1"
mysql-root-host: "%"
TZ: Asia/Seoul
master.cnf: |
[mysqld]
log-bin
character-set-server=utf8
collation-server=utf8_general_ci
slave.cnf: |
[mysqld]
super-read-only
character-set-server=utf8
collation-server=utf8_general_ci

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
labels:
env: production
tier: database
app: mysql
name: mysql-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
storageClassName: nfs

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
name: mysql-secret
labels:
env: production
tier: database
app: secret
name: mysql-secret
type: Opaque
data:
mysql-password: bXNhcG9ydGFs # msaportal base64 encoded

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
env: production
tier: database
app: mysql
name: mysql-service
spec:
selector:
env: production
tier: database
app: mysql
name: mysql-pod
ports:
- name: http-3306
protocol: TCP
port: 3306
targetPort: 3306

View File

@@ -0,0 +1,171 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql-statefulset
labels:
env: production
tier: database
app: mysql
name: mysql-statefulset
spec:
serviceName: mysql
replicas: 1
selector:
matchLabels:
env: production
tier: database
app: mysql
name: mysql-pod
template:
metadata:
labels:
env: production
tier: database
app: mysql
name: mysql-pod
spec:
initContainers:
- name: init-mysql
image: mysql:5.7
command:
- bash
- "-c"
- |
set -ex
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
if [[ $ordinal -eq 0 ]]; then
cp /mnt/mysql-configmap/master.cnf /mnt/conf.d/
else
cp /mnt/mysql-configmap/slave.cnf /mnt/conf.d/
fi
volumeMounts:
- name: mysql-conf
mountPath: /mnt/conf.d
- name: mysql-configmap
mountPath: /mnt/mysql-configmap
- name: clone-mysql
image: gcr.io/google-samples/xtrabackup:1.0
command:
- bash
- "-c"
- |
set -ex
[[ -d /var/lib/mysql/mysql ]] && exit 0
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: mysql-data
mountPath: /var/lib/mysql
subPath: mysql
- name: mysql-conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: mysql:5.7
env:
- name: MYSQL_USER
valueFrom:
configMapKeyRef:
key: mysql-user
name: mysql-configmap
- name: MYSQL_DATABASE
valueFrom:
configMapKeyRef:
key: mysql-database
name: mysql-configmap
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
key: mysql-password
name: mysql-secret
- name: MYSQL_ALLOW_EMPTY_PASSWORD
valueFrom:
configMapKeyRef:
key: mysql-allow-empty-password
name: mysql-configmap
- name: MYSQL_ROOT_HOST
valueFrom:
configMapKeyRef:
key: mysql-root-host
name: mysql-configmap
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: mysql-data
mountPath: /var/lib/mysql
subPath: mysql
- name: mysql-conf
mountPath: /etc/mysql/conf.d
startupProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 10
periodSeconds: 2
timeoutSeconds: 1
readinessProbe:
exec:
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 30
periodSeconds: 2
timeoutSeconds: 1
- name: xtrabackup
image: gcr.io/google-samples/xtrabackup:1.0
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then
cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
rm -f xtrabackup_slave_info xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm -f xtrabackup_binlog_info xtrabackup_slave_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
mysql -h 127.0.0.1 \
-e "$(<change_master_to.sql.in), \
MASTER_HOST='mysql-0.mysql', \
MASTER_USER='root', \
MASTER_PASSWORD='', \
MASTER_CONNECT_RETRY=10; \
START SLAVE;" || exit 1
mv change_master_to.sql.in change_master_to.sql.orig
fi
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: mysql-data
mountPath: /var/lib/mysql
subPath: mysql
- name: mysql-conf
mountPath: /etc/mysql/conf.d
volumes:
- name: mysql-conf
emptyDir: {}
- name: mysql-configmap
configMap:
name: mysql-configmap
- name: mysql-data
persistentVolumeClaim:
claimName: mysql-pvc

View File

@@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: jenkins-ingress
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-ingress
spec:
rules:
- host: <젠킨스 도메인> # TODO
http:
paths:
- path: /
backend:
serviceName: jenkins
servicePort: 8080

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins-pvc
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
storageClassName: nfs

View File

@@ -0,0 +1,55 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-sa
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: jenkins
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-r
rules:
- apiGroups: ["extensions","apps"]
resources: ["deployments"]
verbs: ["get","list","watch","create","update","patch","delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: jenkins
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: jenkins
subjects:
- kind: ServiceAccount
name: jenkins

View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: Service
metadata:
name: jenkins
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-service
spec:
ports:
- port: 8080
targetPort: 8080
selector:
env: production
tier: cicd
app: jenkins
name: jenkins-pod
---
apiVersion: v1
kind: Service
metadata:
name: jenkins-jnlp
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-jnlp-service
spec:
ports:
- port: 50000
targetPort: 50000
selector:
env: production
tier: cicd
app: jenkins
name: jenkins-pod

View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: jenkins-statefulset
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-statefulset
spec:
serviceName: jenkins
replicas: 1
selector:
matchLabels:
env: production
tier: cicd
app: jenkins
name: jenkins-pod
template:
metadata:
labels:
env: production
tier: cicd
app: jenkins
name: jenkins-pod
spec:
serviceAccountName: jenkins
containers:
- name: jenkins
image: jenkins/jenkins:lts
imagePullPolicy: Always
ports:
- name: http-port
containerPort: 8080
- name: jnlp-port
containerPort: 50000
volumeMounts:
- name: jenkins-volume
mountPath: /var/jenkins_home
volumes:
- name: jenkins-volume
persistentVolumeClaim:
claimName: jenkins-pvc

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch-deployment
labels:
env: production
tier: backend
app: elasticsearch
name: elasticsearch-deployment
spec:
replicas: 1
selector:
matchLabels:
env: production
tier: backend
app: elasticsearch
name: elasticsearch-pod
template:
metadata:
labels:
env: production
tier: backend
app: elasticsearch
name: elasticsearch-pod
spec:
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1
imagePullPolicy: Always
env:
- name: discovery.type
value: single-node
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- name: TZ
valueFrom:
configMapKeyRef:
name: common-configmap
key: TZ
ports:
- containerPort: 9200
name: http
- containerPort: 9300
name: tcp

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
labels:
env: production
tier: backend
app: elasticsearch
name: elasticsearch-service
spec:
selector:
env: production
tier: backend
app: elasticsearch
name: elasticsearch-pod
ports:
- port: 9200
protocol: TCP
targetPort: 9200

View File

@@ -0,0 +1,19 @@
# kubectl create configmap kibana-configmap --from-file=config/kibana.yml
apiVersion: v1
kind: ConfigMap
metadata:
name: kibana-configmap
labels:
env: production
tier: backend
app: kibana
name: kibana-configmap
data:
kibana.yml: |
server.name: kibana
server.host: "0.0.0.0"
## X-Pack security credentials
# xpack.monitoring.ui.container.elasticsearch.enabled: true
# elasticsearch.username: elastic
# elasticsearch.password: changeme

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-deployment
labels:
env: production
tier: backend
app: kibana
name: kibana-deployment
spec:
replicas: 1
selector:
matchLabels:
env: production
tier: backend
app: kibana
name: kibana-pod
template:
metadata:
labels:
env: production
tier: backend
app: kibana
name: kibana-pod
spec:
containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:7.10.1
env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch.default.svc.cluster.local:9200
- name: ELASTICSEARCH_HOSTS
value: '["http://elasticsearch.default.svc.cluster.local:9200"]'
- name: TZ
valueFrom:
configMapKeyRef:
name: common-configmap
key: TZ
ports:
- containerPort: 5601
name: http
volumeMounts:
- name: kibana-config-volumne
mountPath: /usr/share/kibana/config/
volumes:
- name: kibana-config-volumne
configMap:
name: kibana-configmap

View File

@@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: kibana-ingress
labels:
env: production
tier: logging
app: kibana
name: kibana-ingress
spec:
rules:
- host: <ELK Kibana 도메인> # TODO
http:
paths:
- path: /
backend:
serviceName: kibana
servicePort: 5601

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: kibana
labels:
env: production
tier: backend
app: kibana
name: kibana-service
spec:
selector:
env: production
tier: backend
app: kibana
name: kibana-pod
ports:
- protocol: TCP
port: 5601
targetPort: 5601

View File

@@ -0,0 +1,36 @@
#kubectl create configmap logstash-configmap --from-file=config/logstash.yml --from-file=pipeline/logstash.conf
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
labels:
env: production
tier: backend
app: logstash
name: logstash-configmap
data:
logstash.conf: |
# LogstashTcpSocketAppender
input {
tcp {
port => 8088
codec => json_lines
}
}
output {
elasticsearch {
hosts => "http://elasticsearch.default.svc.cluster.local:9200"
}
}
logstash.yml: |-
# http.host: "0.0.0.0"
http.host: "127.0.0.1"
path.config: /usr/share/logstash/pipeline
# pipeline.workers: 2
## X-Pack security credentials
#
# xpack.monitoring.enabled: true
# xpack.monitoring.elasticsearch.username: elastic
# xpack.monitoring.elasticsearch.password: changeme

View File

@@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash-deployment
labels:
env: production
tier: backend
app: logstash
name: logstash-deployment
spec:
replicas: 1
selector:
matchLabels:
env: production
tier: backend
app: logstash
name: logstash-pod
template:
metadata:
labels:
env: production
tier: backend
app: logstash
name: logstash-pod
spec:
containers:
- name: logstash
image: docker.elastic.co/logstash/logstash:7.10.1
ports:
- containerPort: 8088
resources:
requests:
cpu: 300m
memory: 512Mi
limits:
cpu: 300m
memory: 512Mi
volumeMounts:
- name: logstash-config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
volumes:
- name: logstash-config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: logstash
labels:
env: production
tier: backend
app: logstash
name: logstash-service
spec:
selector:
env: production
tier: backend
app: logstash
name: logstash-pod
ports:
- protocol: TCP
port: 8088
targetPort: 8088

View File

@@ -0,0 +1,46 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner-deployment
labels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-deployment
spec:
selector:
matchLabels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-pod
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-pod
spec:
serviceAccountName: nfs-provisioner-sa
containers:
- name: nfs-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
env:
- name: PROVISIONER_NAME
value: nfs-provisioner
- name: NFS_SERVER
value: 192.168.56.21
- name: NFS_PATH
value: /srv/nfs
volumeMounts:
- name: nfs-volume
mountPath: /persistentvolumes
volumes:
- name: nfs-volume
nfs:
server: 192.168.56.21
path: /srv/nfs

View File

@@ -0,0 +1,13 @@
#!/bin/sh
# NFS 클라이언트 패키지를 설치한다.
sudo apt install -y nfs-common
# 마운트할 디렉터리를 만든다.
sudo mkdir /srv/nfs
# 디렉터리를 마운트한다.
sudo mount -t nfs 192.168.56.21:/srv/nfs /srv/nfs
# 디스크를 확인한다.
df -h

View File

@@ -0,0 +1,22 @@
#!/bin/sh
# NFS 서버 패키지를 설치한다.
sudo apt install -y nfs-kernel-server
# 공유 디렉터리를 만든다.
sudo mkdir /srv/nfs
# 공유 디렉터리에 권한을 부여한다.
sudo chown -R nobody:nogroup /srv/nfs
sudo chmod 777 /srv/nfs
# 공유 디렉터리 내보내기를 설정한다.
echo "/srv/nfs *(rw,sync,no_subtree_check)" | sudo tee /etc/exports
# NFS 서버를 재시작하고 상태를 확인한다.
sudo systemctl restart nfs-kernel-server
sudo systemctl status nfs-kernel-server
# NFS 포트를 방화벽에서 허용한다.
sudo iptables -A INPUT -p tcp --dport 2049 -j ACCEPT
sudo iptables -A INPUT -p udp --dport 2049 -j ACCEPT

View File

@@ -0,0 +1,89 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-provisioner-sa
labels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-sa
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-cr
labels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-cr
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-crb
labels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-crb
subjects:
- kind: ServiceAccount
name: nfs-provisioner-sa
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-cr
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-r
labels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-r
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-rb
labels:
env: production
tier: storage
app: nfs
name: nfs-provisioner-rb
subjects:
- kind: ServiceAccount
name: nfs-provisioner-sa
namespace: default
roleRef:
kind: Role
name: nfs-provisioner-r
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,12 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs
labels:
env: production
tier: storage
app: nfs
name: nfs
provisioner: nfs-provisioner
parameters:
archiveOnDelete: "false"

View File

@@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rabbitmq-deployment
labels:
env: production
tier: backend
app: rabbitmq
name: rabbitmq-deployment
spec:
replicas: 1
selector:
matchLabels:
env: production
tier: backend
app: rabbitmq
name: rabbitmq-pod
template:
metadata:
labels:
env: production
tier: backend
app: rabbitmq
name: rabbitmq-pod
spec:
containers:
- name: rabbitmq
image: rabbitmq:management
ports:
- containerPort: 5672
name: http-5672
- containerPort: 15672
name: http-15672
env:
- name: TZ
valueFrom:
configMapKeyRef:
name: common-configmap
key: TZ
startupProbe:
httpGet:
path: /
port: 15672
initialDelaySeconds: 10
periodSeconds: 5
successThreshold: 1
failureThreshold: 36
readinessProbe:
httpGet:
path: /
port: 15672
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
failureThreshold: 3

View File

@@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: rabbitmq-ingress
labels:
env: production
tier: backend
app: rabbitmq
name: rabbitmq-ingress
spec:
rules:
- host: <RabbitMQ 도메인> # TODO
http:
paths:
- path: /
backend:
serviceName: rabbitmq
servicePort: 15672

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: rabbitmq
labels:
env: production
tier: backend
app: rabbitmq
name: rabbitmq-service
spec:
selector:
env: production
tier: backend
app: rabbitmq
name: rabbitmq-pod
ports:
- name: http-5672
protocol: TCP
port: 5672
targetPort: 5672
- name: http-15672
protocol: TCP
port: 15672
targetPort: 15672

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backend-pvc
labels:
env: production
tier: backend
app: pvc
name: backend-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: cinder

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backend-pvc
labels:
env: production
tier: backend
app: pvc
name: backend-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
storageClassName: nfs

74
k8s/environments/vagrant/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,74 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
# control-plane
config.vm.define "control-plane1" do |config|
config.vm.box = "ubuntu/focal64"
config.vm.provider "virtualbox" do |vb|
vb.name = "control-plane1"
vb.cpus = 2
vb.memory = 4000
unless File.exist?('./.disk/ceph1.vdi')
vb.customize ['createmedium', 'disk', '--filename', './.disk/ceph1.vdi', '--size', 10240]
end
vb.customize ['storageattach', :id, '--storagectl', 'SCSI', '--port', 2, '--device', 0, '--type', 'hdd', '--medium',
'./.disk/ceph1.vdi']
end
config.vm.hostname = "control-plane1"
config.vm.network "private_network", ip: "192.168.56.21", nic_type: "virtio"
config.vm.network "forwarded_port", guest: 6443, host: 6443, protocol: "tcp"
config.vm.network "forwarded_port", guest: 80, host: 8080, protocol: "tcp"
end
# worker1
config.vm.define "worker1" do |config|
config.vm.box = "ubuntu/focal64"
config.vm.provider "virtualbox" do |vb|
vb.name = "worker1"
vb.cpus = 2
vb.memory = 4000
unless File.exist?('./.disk/ceph2.vdi')
vb.customize ['createmedium', 'disk', '--filename', './.disk/ceph2.vdi', '--size', 10240]
end
vb.customize ['storageattach', :id, '--storagectl', 'SCSI', '--port', 2, '--device', 0, '--type', 'hdd', '--medium',
'./.disk/ceph2.vdi']
end
config.vm.hostname = "worker1"
config.vm.network "private_network", ip: "192.168.56.22", nic_type: "virtio"
end
# worker2
config.vm.define "worker2" do |config|
config.vm.box = "ubuntu/focal64"
config.vm.provider "virtualbox" do |vb|
vb.name = "worker2"
vb.cpus = 2
vb.memory = 4000
unless File.exist?('./.disk/ceph3.vdi')
vb.customize ['createmedium', 'disk', '--filename', './.disk/ceph3.vdi', '--size', 10240]
end
vb.customize ['storageattach', :id, '--storagectl', 'SCSI', '--port', 2, '--device', 0, '--type', 'hdd', '--medium',
'./.disk/ceph3.vdi']
end
config.vm.hostname = "worker2"
config.vm.network "private_network", ip: "192.168.56.23", nic_type: "virtio"
end
# Hostmanager plugin
# vagrant plugin install vagrant-hostmanager
config.hostmanager.enabled = true
config.hostmanager.manage_guest = true
# Disable Synced Folder
config.vm.synced_folder ".", "/vagrant", disabled: true
# Enable SSH Password Authentication
config.vm.provision "shell", inline: <<-SHELL
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
sed -i 's/archive.ubuntu.com/mirror.kakao.com/g' /etc/apt/sources.list
sed -i 's/security.ubuntu.com/mirror.kakao.com/g' /etc/apt/sources.list
systemctl restart ssh
SHELL
end

View File

@@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin-deployment
labels:
env: production
tier: backend
app: zipkin
name: zipkin-deployment
spec:
replicas: 1
selector:
matchLabels:
env: production
tier: backend
app: zipkin
name: zipkin-pod
template:
metadata:
labels:
env: production
tier: backend
app: zipkin
name: zipkin-pod
spec:
containers:
- name: zipkin
image: openzipkin/zipkin
ports:
- containerPort: 9411
name: http-9411
env:
- name: TZ
valueFrom:
configMapKeyRef:
name: common-configmap
key: TZ
startupProbe:
httpGet:
path: /zipkin/
port: 9411
initialDelaySeconds: 10
periodSeconds: 5
successThreshold: 1
failureThreshold: 36
readinessProbe:
httpGet:
path: /zipkin/
port: 9411
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
failureThreshold: 3

View File

@@ -0,0 +1,18 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: zipkin-ingress
labels:
env: production
tier: backend
app: zipkin
name: zipkin-ingress
spec:
rules:
- host: <Zipkin 도메인> # TODO
http:
paths:
- path: /
backend:
serviceName: zipkin
servicePort: 9411

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: zipkin
labels:
env: production
tier: backend
app: zipkin
name: zipkin-service
spec:
selector:
env: production
tier: backend
app: zipkin
name: zipkin-pod
ports:
- name: http-9411
protocol: TCP
port: 9411
targetPort: 9411