nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.27-alpine
ports:
- containerPort: 80
resources:
requests:
cpu: "100m"
memory: "64Mi"
limits:
cpu: "500m"
memory: "64Mi"
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 3
periodSeconds: 5
kubectl apply -f nginx-deployment.yaml
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get all
kubectl get deployments
kubectl get pods
kubectl get svc
kubectl get nodes
kubectl get nodes -o wide
kubectl get pods -n kube-system
kubectl describe deployment nginx
kubectl replace --force -f nginx-deployment.yaml
run (deployment)
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
run: nginx
template:
metadata:
labels:
run: nginx
spec:
containers:
- image: nginx
name: nginx
expose (service)
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
selector:
run: nginx
ports:
- port: 80
targetPort: 80
protocol: TCP
nodePort: 3xxxx # 실제 생성되면 랜덤 포트로 부여됨
create deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
create service
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
selector:
app: nginx
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 3xxxx # 자동으로 할당됨
pod edit
kubectl edit pod NAME
즉시 수정됨.
sleep
kubectl run web-pod --image=busybox --command sleep 3200 --dry-run=client -o yaml > busybox.yml
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
kubectl get pods
kubectl describe pods web-pod
rolling update
kubectl create deployment myproject \
--image=nginx:1.16 --dry-run=client -o yaml > nginx116.yml
kubectl get deployment
kubectl describe deployments myproject
kubectl set image deployment/myproject nginx=nginx:1.17 --record
kubectl rollout history deployment
replication
kubectl create deployment my-deployment \
--image=nginx --replicas=3 --dry-run=client \
-o yaml > replication
kubectl run web-nginx --image=image:1.17 --labels tier=web-app
static pod
kubectl get nodes
ssh ...
ps aux | grep kubelet
cat /etc/kubernetes/kubelet-config.yaml | grep static
~~~
cd /etc/kubernetes/manifests
vi static-pod.yml
apiVersion: v1
kind: Pod
metadata:
name: static-pod
spec:
containers:
- name: nginx-container
image: nginx
mutiple pods
kubectl run pod-multi --image=nginx -o yaml \
--dry-run=client > pod-multi.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod-multi
name: pod-multi
spec:
containers:
- image: nginx
name: container1
- name: container2
image: busybox
command: ['sh', '-c', 'sleep 3600']
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
kubectl get pods
namespace
kubectl create namespace custom
kubectl get namespaces
kubectl run test-pod --image=nginx:1.17 -n custom \
-l env=test --dry-run=client -o yaml > test-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
env: test
tier: backend
name: test-pod
namespace: custom
spec:
containers:
- image: nginx:1.17
name: test-pod
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
kubectl apply -f test-pod.yaml
kubectl get pods
kubectl get pods -A
config print
kubectl get nodes
kubectl get node oc-prv-k8s-sprag-1 -o json > node-info.json
scale out
kubectl scale --replicas=5 rs/new-replica-set
deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpd-frontend
labels:
app: httpd
spec:
replicas: 3
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
containers:
- name: httpd
image: httpd:2.4-alpine
ports:
- containerPort: 80
kubectl apply -f ~~~
kubectl create deployment webapp \
--image=kodekloud/webapp-color --replicas=3
service
kubectl run httpd --image=httpd:alpine --port=80 --expose
run
: pod 또는 deployment 생성--expose
: service까지 생성
cluster upgrade
Control plane upgrade
kubeadm upgrade plan
kubeadm upgrade apply v1.xx.x
kubelet / kubectl upgrade
apt-mark unhold kubelet kubectl
apt-get install -y kubelet=1.xx.x-00 kubectl=1.xx.x-00
apt-mark hold kubelet kubectl
systemctl restart kubelet
Woker node upgrade
kubectl drain worker1 --ignore-daemonsets
apt-mark unhold kubeadm kubelet kubectl
apt-get install -y kubelet=1.28.3-00 kubeadm=1.28.3-00 kubectl=1.28.3-00
apt-mark hold kubeadm kubelet kubectl
systemctl restart kubelet
kubectl uncordon worker1
ETCD Backup/Restore
backup
ETCDCTL_API=3 etcdctl \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
snapshot save /tmp/etcd-backup.db
restore
ETCDCTL_API=3 etcdctl \
snapshot restore /tmp/etcd-backup.db \
--data-dir=/var/lib/etcd-restore
/etc/kubernetes/manifests/etcd.yaml
command:
- etcd
- --data-dir=/var/lib/etcd-restore
User Authenticate / Authorize
# 개인 키 생성
openssl genrsa -out user.key 2048
# 인증서 요청서 (CSR) 생성
openssl req -new -key user.key -out user.csr -subj "/CN=dev-user/O=dev-group"
# 인증서 서명 (cluster CA로)
openssl x509 -req -in user.csr \
-CA /etc/kubernetes/pki/ca.crt \
-CAkey /etc/kubernetes/pki/ca.key \
-CAcreateserial \
-out user.crt \
-days 365
# kubeconfig에 사용자 추가
kubectl config set-credentials dev-user --client-certificate=user.crt --client-key=user.key
# context에 사용자 적용
kubectl config set-context dev-context --cluster=kubernetes --user=dev-user
# context 전환
kubectl config use-context dev-context
예제: dev-user에게 default 네임스페이스에서 pod 읽기 권한 부여
# role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pod-reader
namespace: default
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
# binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: read-pods-binding
namespace: default
subjects:
- kind: User
name: dev-user
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: pod-reader
apiGroup: rbac.authorization.k8s.io
kubectl apply -f role.yaml
kubectl apply -f binding.yaml
Context
# 컨텍스트 확인
kubectl config get-contexts
# 현재 컨텍스트 확인
kubectl config current-context
# 컨텍스트 전환
kubectl config use-context dev-context
# 컨텍스트 추가
kubectl config set-context dev-context --cluster=kubernetes --user=dev-user --namespace=default
instant command
kubectl run ping-job --image=busybox \
--restart=Never \
--command -- ping -c 4 google.com && kubectl delete pod ping-job
taint
kubectl taint nodes NODE_NAME KEY=BALUE:EFFECT
EFFECT
NoSchedule
PreferNoSchedule
NoExecute
: toleration 없는 pod는 스케줄 못함. 기존 파드도 eviction
예제: taint 추가
kubectl taint nodes node01 dedicated=frontend:NoSchedule
예제: toleration 파드 만들기
apiVersion: v1
kind: Pod
metadata:
name: mypod
spec:
tolerations:
- key: "dedicated"
operator: "Equal"
value: "frontend"
effect: "NoSchedule"
containers:
- name: nginx
image: nginx
예제: taint 제거
kubectl taint nodes node01 dedicated=frontend:NoSchedule-
toleration
taint가 있는 노드에도 pod가 스케줄 될 수 있도록 허용하는 설정
- 시험에서 노드에 taint가 있는 상태로 주어지고 → 그 노드에 파드를 스케줄하라는 문제 자주 나옴
- 이럴 땐 tolerations 필드를 정확히 작성해야 함
- tolerationSeconds를 주고 NoExecute 테스트하는 문제도 출제 가능성 있음
NetworkPolicy
- NetworkPolicy는 Pod 간의 통신 제어를 담당하는 리소스
- 기본적으로 쿠버네티스는 모든 Pod 간의 통신을 허용
- NetworkPolicy를 적용하면, 명시된 트래픽만 허용하고 나머지는 차단
기본 요소
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-nginx
namespace: default
spec:
podSelector:
matchLabels:
app: nginx
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
access: granted
필수 요소 설명:
podSelector
: 이 정책의 대상 Pod를 지정policyTypes
: Ingress, Egress 또는 둘 다ingress
: 들어오는 트래픽 제어egress
: 나가는 트래픽 제어from / to
: podSelector, namespaceSelector, ipBlock 등으로 정의
예제: 모든 Pod 간 통신을 막고, app=frontend만 app=backend에 접근 가능하게 설정
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-frontend-to-backend
namespace: default
spec:
podSelector:
matchLabels:
app: backend
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: frontend
예제: “모든 통신을 막고 싶다” (Ingress, Egress 모두)
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all
namespace: default
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
DNS
kubectl run dns-resolver --image=nginx
kubectl expose pod dns-resolver \
--name=dns-resolver-service \
--port=80 \
--target-port=80 \
--type=ClusterIP
확인하는 코드 (쉘과 상호작용)
kubectl run test-nslookup \
--image=busybox:1.28 --rm -it --restart=Never \
-- nslookup dns-resolver-service
kubectl run test-nslookup \
--image=busybox:1.28 --rm -it --restart=Never \
-- nslookup dns-resolver-service \
> /root/nginx.svc
확인하는 코드 (단순 실행)
kubectl run ping-job \
--image=busybox:1.28 \
--restart=Never \
--command \
-- nslookup dns-resolver-service \
&& kubectl delete pod ping-job
T/S
kubectl get pods
Pending 또는 그에 준하는 상태일때 이슈를 파악하고 조치하는 방법
kubectl describe pod appychip
Event 항목 참고
taint 관련..
kubectl get nodes
kubectl describe nodes node01 | grep taint
kubectl get pod appychip -o yaml > appychip.yml
pod 설정을 보고 tolerations 밑에 key-operator-value-effect를 지정한다.
kubectl apply -f appychip.yml
replica 관련..
kubectl get pods
복제본의 확인
networking관련
1~2개 사이의 질문. 난이도 있음. 나중에 풀 것
https://kubernetes.io/docs/concepts/services-networking/network-policies
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: test-network-policy
namespace: default
spec:
podSelector:
matchLabels:
role: db
policyTypes:
- Ingress
- Egress
ingress:
- from:
- ipBlock:
cidr: 172.17.0.0/16
except:
- 172.17.1.0/24
- namespaceSelector:
matchLabels:
project: myproject
- podSelector:
matchLabels:
role: frontend
ports:
- protocol: TCP
port: 6379
egress:
- to:
- ipBlock:
cidr: 10.0.0.0/24
ports:
- protocol: TCP
port: 5978
kubectl apply -f network.yml
kubectl get networkpolicy appychip
kubectl describe networkpolicy appychip
context 출력 관련..
kubectl config get-contexts -o name > contexts
kubectl config current-context
cat ~/.kube/config | grep current
pod 출력 관련..
kubectl get pod -A --sort-by=.metadata.creationTimestamp
kubectl get pod -A --sort-by=.metadata.uid
controlplane 관련..
kubectl get nodes
ssh controlplane
find /etc/systemd/system | grep kube
find /etc/kubernetes/manifests/
kibectl -n kube-system get pod -o wide | grep controlplane
kubectl -n kube-system get ds
kubectl -n kube-system get deploy
updated..
deployments
두 배포가 있음. NS는 분리되어있음. NS간 상호작용을 위해 네트워크 정책을 지정해야함.
- 여러개의 yaml 중에 가장 적합한것은 무엇인가?
ConfigMap
nginx with TLS v1.3
kubectl get configmap
## test-configmap.yml
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
data:
nginx.conf: |
events {}
http {
server {
listen 443 ssl;
server_name example.com;
ssl_certificate /etc/nginx/certs/tls.crt;
ssl_certificate_key /etc/nginx/certs/tls.key;
ssl_protocols TLSv1.2 TLSv1.3;
location / {
root /usr/share/nginx/html;
index index.html;
}
}
}
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
-keyout tls.key -out tls.crt -subj "/CN=example.com"
kubectl create secret tls nginx-tls --cert=tls.crt --key=tls.key
kubectl apply -f test-configmap.yml
kubectl create deployment nginx --image=nginx --replicas=1
## nginx-deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 443
volumeMounts:
- name: config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: certs
mountPath: /etc/nginx/certs
volumes:
- name: config
configMap:
name: nginx-config
- name: certs
secret:
secretName: nginx-tls
kubectl apply -f nginx-deployment.yml
kubectl expose deployment nginx --port=443 --target-port=443 --type=NodePort
문제 풀이
kubectl get cm
kubectl edit cm nginx-config
배포 규모를 0으로 줄이고 다시 늘리기
kubectl scale deployment nginx --replicas=0
kubectl get deployment nginx
kubectl scale deployment nginx --replicas=1
kubectl rollout restart deployment nginx
ingress
kubectl get namespace
kubectl get deployment
kubectl create ns sound-repeater
kubectl create deployment echoserver --image=nginx --replicas=1 -n sound-repeater
kubectl expose deployment echoserver \
--name=echoserver-service --port=80 \
--target-port=8080 --type=NodePort \
--namespace sound-repeater
kubectl get deployment -A
kubectl get svc -A
kubectl get ingressclass
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: echoserver-service-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx-example
rules:
- host: www.example.org
http:
paths:
- path: /echo
pathType: Prefix
backend:
service:
name: echoserver-service
port:
number: 8080