시험)
2-1. nginx/톰캣/MySQL로 WAS-DB 2 tier나 WEB-WAS-DB 3 tier를 구성, 브라우저에서 www.<본인도메인>/exam 으로 접속했을때 DB연동을 확인하는 페이지가 뜨도록 하세요.
2-2. www.<본인도메인>/h 로 접속했을때 hostname을 출력하는 페이지가 뜨도록하세요.
2-3. www.<본인도메인>/ip 로 접속했을때 ip를 출력하는 페이지가 뜨도록하세요.
RDS의 포트는 33306으로 하세요.
EKS클러스터가 접근하는 컨테이너 레지스트리는 무조건 ECR을 사용하세요.
단, 베이스이미지는 61.254.18.30:5000/ipnginx, 61.254.18.30:5000/hnginx 나 oolralra/ipnginx, oolralra/hnginx를 사용하세요. 톰캣은 아무거나 쓰셔도 됩니다
eksctl scale nodegroup --cluster <클러스터이름> --name <노드그룹이름> --nodes <변경할노드수> --nodes-max <변경할최대노드수>
eksctl scale nodegroup --cluster pric --name mycng --nodes 2
eksctl create cluster --vpc-private-subnets subnet-033d8f789eee8880d,subnet-0ca72dc02ff537a25 --name pric --region ap-northeast-2 --version 1.32 --nodegroup-name mycng --node-type t3.small --nodes 1 --nodes-min 1 --nodes-max 3 --node-private-networking
curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.12.0/docs/install/iam_policy.json
aws iam create-policy --policy-name AWSLoadBalancerControllerIAMPolicy --policy-document file://iam_policy.json
export CLUSTER_NAME=pric
export ACCOUNT_ID=651109015678
export VPC_ID=vpc-0a4d6d7b49d1ecafc
export REGION=ap-northeast-2
eksctl utils associate-iam-oidc-provider --cluster $CLUSTER_NAME --approve
eksctl create iamserviceaccount --cluster=$CLUSTER_NAME --namespace=kube-system --name=aws-load-balancer-controller --role-name AmazonEKSLoadBalancerControllerRole --attach-policy-arn=arn:aws:iam::$ACCOUNT_ID:policy/AWSLoadBalancerControllerIAMPolicy --override-existing-serviceaccounts --approve
kubectl get sa -n kube-system | grep -i load
eksctl utils describe-addon-versions --kubernetes-version 1.32 | grep AddonName
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 > get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
helm repo add eks https://aws.github.io/eks-charts
helm repo update
helm install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$CLUSTER_NAME --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller --set image.repository=602401143452.dkr.ecr.ap-northeast-2.amazonaws.com/amazon/aws-load-balancer-controller --set region=ap-northeast-2 --set vpcId=$VPC_ID
helm list -n kube-system
was
vi Dockerfile
FROM tomcat:latest
RUN apt-get update && apt-get install -y wget && wget -O /usr/local/tomcat/lib/my-connector-java-8.0.23.jar https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.23/mysql-connector-java-8.0.23.jar && apt-get clean
WORKDIR /usr/local/tomcat/webapps/ROOT
COPY index.jsp .
EXPOSE 8080
CMD ["catalina.sh", "run"]
vi index.jsp
<%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%>
<%@ page import="java.sql.*"%>
<h1>DB</h2>
<%
Connection conn=null;
try{
String Url="jdbc:mysql://rapa-vpc-db.c7agowsw8ogt.ap-northeast-2.rds.amazonaws.com:33306/mydb";
String Id="admin";
String Pass="test123!";
Class.forName("com.mysql.jdbc.Driver");
conn=DriverManager.getConnection(Url,Id,Pass);
out.println("was-db Connection Success!");
}catch(Exception e) {
e.printStackTrace();
}
%>
docker login -u leeseohoo
docker build -t leeseohoo/was:1 .
docker push leeseohoo/was:1
- deployment 생성
kubectl create deployment dbtest --image=leeseohoo/dbtest:1 --replicas=1
- svc 생성
kubectl expose deploy dbtest --port 8080 --target-port 8080 --type=NodePort
kubectl get svc
vi was-ingress.yml
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: "was-ingress" labels: app.kubernetes.io/name: "was-ingress" annotations: alb.ingress.kubernetes.io/scheme: internal alb.ingress.kubernetes.io/target-type: ip spec: ingressClassName: alb rules: - http: paths: - pathType: Prefix path: / backend: service: name: was port: number: 8080 |
kubectl apply -f was-ingress.yml
web
vi /etc/docker/daemon.json
{
"insecure-registries": ["61.254.18.30:5000"]
}
sudo systemctl restart docker
vi Dockerfile
FROM 61.254.18.30:5000/ipnginx
COPY ip/index.html /usr/local/apache2/htdocs/ip/index.html
COPY h/index.html /usr/local/apache2/htdocs/h/index.html
FROM oolralra/ipnginx
COPY nginx.conf /etc/nginx/nginx.conf
vi httpd.conf
ServerRoot "/usr/local/apache2"
Listen 80
# 기본 모듈 로드
LoadModule mpm_event_module modules/mod_mpm_event.so
LoadModule dir_module modules/mod_dir.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule rewrite_module modules/mod_rewrite.so
# DocumentRoot 및 Directory 설정
DocumentRoot "/usr/local/apache2/htdocs"
<Directory "/usr/local/apache2/htdocs">
Options Indexes FollowSymLinks
AllowOverride None
Require all granted
</Directory>
# 기본 인덱스 파일 설정
DirectoryIndex index.html
# 로그 설정 (원하면 커스터마이징 가능)
ErrorLog /proc/self/fd/2
CustomLog /proc/self/fd/1 common
# 프록시 설정
ProxyRequests Off
ProxyPreserveHost On
<Proxy *>
Require all granted
</Proxy>
# ALB로 프록시 연결
ProxyPass "/exam" "http://internal-k8s-default-wasingre-867842459e-1503127101.ap-northeast-2.elb.amazonaws.com/"
ProxyPassReverse "/exam" "http://internal-k8s-default-wasingre-867842459e-1503127101.ap-northeast-2.elb.amazonaws.com/"
vi h/index.html
docker build -t leeseohoo/hnginx:1 .
docker push leeseohoo/hnginx:1
kubectl create deployment hnginx --image=leeseohoo/hnginx:1 --replicas=1
kubectl expose deploy hnginx --port 80 --target-port 80 --type=NodePort
vi ip/index.html
docker build -t leeseohoo/ipnginx:1 .
docker push leeseohoo/ipnginx:1
kubectl create deployment ipnginx --image=leeseohoo/ipnginx:1 --replicas=1
kubectl expose deploy ipnginx --port 80 --target-port 80 --type=NodePort
docker build -t leeseohoo/web:4 .
docker push leeseohoo/web:4
- deployment 생성
kubectl create deployment web --image=leeseohoo/web:1 --replicas=1
- svc 생성
kubectl expose deploy web --port 80 --target-port 80 --type=NodePort
kubectl exec -it web-5cdb5d6868-8627m -- bash
kubectl exec -it dbtest-f7b6998f4-6x6l7 -- bash
vi web-ingress.yml
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: "web-ingress" labels: app.kubernetes.io/name: "web-ingress" annotations: alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/target-type: ip spec: ingressClassName: alb rules: - http: paths: - pathType: Prefix path: / backend: service: name: web port: number: 80 |
kubectl apply -f web-ingress.yml
풀이)
2 Tier
앞에 프론트 없이 백엔드만 배포
eksctl create cluster --vpc-private-subnets subnet-033d8f789eee8880d,subnet-0ca72dc02ff537a25 --name pric --region ap-northeast-2 --version 1.32 --nodegroup-name mycng --node-type t3.small --nodes 1 --nodes-min 1 --nodes-max 3 --node-private-networking
- kubeconfig 복사
root@aws-cli:~# aws eks update-kubeconfig --name pric
1. Route53 을 통해 내 도메인을 가비아에 등록
2. ingress 구성
root@aws-cli:~/3tier# vi ingress.yml
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: "3app-ingress" labels: app.kubernetes.io/name: "nginx-ingress" annotations: alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/target-type: ip spec: ingressClassName: alb rules: - http: paths: - pathType: Prefix path: / backend: service: name: svc-tom port: number: 80 - pathType: Prefix path: /ip backend: service: name: svc-ip port: number: 80 - pathType: Prefix path: /h backend: service: name: svc-h port: number: 80 |
root@aws-cli:~/3tier# vi h.yml
apiVersion: v1 kind: Service metadata: name: svc-h spec: selector: app: myh ports: - port: 80 targetPort: 80 --- apiVersion: apps/v1 kind: Deployment metadata: name: dep-myh spec: replicas: 1 selector: matchLabels: app: myh template: metadata: labels: app: myh spec: containers: - name: my-conh image: oolralra/hnginx |
root@aws-cli:~/3tier# vi ip.yml
apiVersion: v1 kind: Service metadata: name: svc-ip spec: selector: app: myip ports: - port: 80 targetPort: 80 --- apiVersion: apps/v1 kind: Deployment metadata: name: dep-myip spec: replicas: 1 selector: matchLabels: app: myip template: metadata: labels: app: myip spec: containers: - name: my-ip image: oolralra/ipnginx |
root@aws-cli:~/3tier# vi tomcat.yml
apiVersion: v1 kind: Service metadata: name: svc-tom spec: selector: app: mytom ports: - port: 80 targetPort: 8080 --- apiVersion: apps/v1 kind: Deployment metadata: name: dep-mytom spec: replicas: 1 selector: matchLabels: app: mytom template: metadata: labels: app: mytom spec: containers: - name: my-tom image: oolralra/tomjdbc:2 |
# tomjdbc:2 = db연동을 확인하는 index.jsp 파일과 jdbc라이브러리를 넣은 이미지
root@aws-cli:~/3tier# kubectl apply -f .
# 엔드포인트가 문제 = pod가 정상동작 안하는거(없거나 문제가 있거나, running중인데도 그런거면 헬스체크에 문제가 있음 → 로드밸런서의 타겟그룹의 상태검사를 가서 본다, 보안그룹도 문제일 수 있다.)
nginx 파드에서 리버스 프록시를 default.conf로 구성하기위해 예전에 썼었던 nginx-configmap을 재활용
git clone https://github.com/pcmin929/k8s-3tier
# 필요한 파일 = web-ingress.yml , configmap , svc-deployment
root@aws-cli:~/exam# vi web-ingress.yml
root@aws-cli:~/exam# kubectl apply -f web-ingress.yml
root@aws-cli:~/exam# vi nginx-configmap.yml
# default.conf 파일에 리버스프록시 주소를 넣자
root@aws-cli:~/exam# kubectl apply -f nginx-configmap.yml
root@aws-cli:~/exam# vi nginx.yml
root@aws-cli:~/exam# kubectl apply -f nginx.yml
# 리버스 프록시 구성에 문제? 혹은 파드에 문제? 백엔드는 정상인걸 확인
- 헬스체크 로그만 있음
kubectl logs nginx-dep-bc9d56c49-2dbzx
# pod 내부에서 curl이 안됨. = web-alb의 보안그룹과도 무관
# 그렇다면 pod는 어디에 존재하는가? = 클러스터의 노드그룹의 노드들(EC2인스턴스)에 존재
# 노드들의 보안그룹을 확인해보자
- 80번 포트의 outbound 모두 허용
최종적으로 도메인에 레코드를 만들어서 web-alb로 연결시켜주면 된다.
물론 web-alb에서 각 svc-tom , svc-h , svc-ip 에 각각 리버스 프록시를 구성해줘도 상관없다.