kubernetes.io > Documentation > Concepts > Overview > Labels and Selectors
kubectl run nginx1 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx2 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx3 --image=nginx --restart=Never --labels=app=v1
# or
for i in `seq 1 3`; do k run nginx$i --image=nginx -l app=v1 ; done
kubectl get po --show-labels
kubectl label po nginx2 app=v2 --overwrite
kubectl get po -L app
# or
kubectl get po --label-columns=app
kubectl get po -l app=v2
# or
kubectl get po -l 'app in (v2)'
# or
kubectl get po --selector=app=v2
kubectl label po nginx1 nginx2 nginx3 app-
# or
kubectl label po nginx{1..3} app-
# or
kubectl label po -l app app-
Add the label to a node:
kubectl label nodes <your-node-name> accelerator=nvidia-tesla-p100
kubectl get nodes --show-labels
We can use the 'nodeSelector' property on the Pod YAML:
apiVersion: v1
kind: Pod
metadata:
name: cuda-test
spec:
containers:
- name: cuda-test
image: "k8s.gcr.io/cuda-vector-add:v0.1"
nodeSelector: # add this
accelerator: nvidia-tesla-p100 # the selection label
You can easily find out where in the YAML it should be placed by:
kubectl explain po.spec
OR:
Use node affinity (https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/#schedule-a-pod-using-required-node-affinity)
apiVersion: v1
kind: Pod
metadata:
name: affinity-pod
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: accelerator
operator: In
values:
- nvidia-tesla-p100
containers:
...
kubectl annotate po nginx1 nginx2 nginx3 description='my description'
#or
kubectl annotate po nginx{1..3} description='my description'
kubectl describe po nginx1 | grep -i 'annotations'
# or
kubectl get po nginx1 -o custom-columns=Name:metadata.name,ANNOTATIONS:metadata.annotations.description
As an alternative to using | grep
you can use jsonPath like kubectl get po nginx1 -o jsonpath='{.metadata.annotations}{"\n"}'
kubectl annotate po nginx{1..3} description-
kubectl delete po nginx{1..3}
kubernetes.io > Documentation > Concepts > Workloads > Controllers > Deployments
kubectl create deployment nginx --image=nginx:1.18.0 --dry-run=client -o yaml > deploy.yaml
vi deploy.yaml
# change the replicas field from 1 to 2
# add this section to the container spec and save the deploy.yaml file
# ports:
# - containerPort: 80
kubectl apply -f deploy.yaml
or, do something like:
kubectl create deployment nginx --image=nginx:1.18.0 --dry-run=client -o yaml | sed 's/replicas: 1/replicas: 2/g' | sed 's/image: nginx:1.18.0/image: nginx:1.18.0\n ports:\n - containerPort: 80/g' | kubectl apply -f -
or,
kubectl create deploy nginx --image=nginx:1.18.0 --replicas=2 --port=80
kubectl get deploy nginx -o yaml
kubectl describe deploy nginx # you'll see the name of the replica set on the Events section and in the 'NewReplicaSet' property
# OR you can find rs directly by:
kubectl get rs -l run=nginx # if you created deployment by 'run' command
kubectl get rs -l app=nginx # if you created deployment by 'create' command
# you could also just do kubectl get rs
kubectl get rs nginx-7bf7478b77 -o yaml
kubectl get po # get all the pods
# OR you can find pods directly by:
kubectl get po -l run=nginx # if you created deployment by 'run' command
kubectl get po -l app=nginx # if you created deployment by 'create' command
kubectl get po nginx-7bf7478b77-gjzp8 -o yaml
kubectl rollout status deploy nginx
kubectl set image deploy nginx nginx=nginx:1.19.8
# alternatively...
kubectl edit deploy nginx # change the .spec.template.spec.containers[0].image
The syntax of the 'kubectl set image' command is kubectl set image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N [options]
kubectl rollout history deploy nginx
kubectl get deploy nginx
kubectl get rs # check that a new replica set has been created
kubectl get po
kubectl rollout undo deploy nginx
# wait a bit
kubectl get po # select one 'Running' Pod
kubectl describe po nginx-5ff4457d65-nslcl | grep -i image # should be nginx:1.18.0
kubectl set image deploy nginx nginx=nginx:1.91
# or
kubectl edit deploy nginx
# change the image to nginx:1.91
# vim tip: type (without quotes) '/image' and Enter, to navigate quickly
kubectl rollout status deploy nginx
# or
kubectl get po # you'll see 'ErrImagePull' or 'ImagePullBackOff'
kubectl rollout undo deploy nginx --to-revision=2
kubectl describe deploy nginx | grep Image:
kubectl rollout status deploy nginx # Everything should be OK
kubectl rollout history deploy nginx --revision=4 # You'll also see the wrong image displayed here
kubectl scale deploy nginx --replicas=5
kubectl get po
kubectl describe deploy nginx
kubectl autoscale deploy nginx --min=5 --max=10 --cpu-percent=80
kubectl rollout pause deploy nginx
kubectl set image deploy nginx nginx=nginx:1.19.9
# or
kubectl edit deploy nginx
# change the image to nginx:1.19.9
kubectl rollout history deploy nginx # no new revision
kubectl rollout resume deploy nginx
kubectl rollout history deploy nginx
kubectl rollout history deploy nginx --revision=6 # insert the number of your latest revision
kubectl delete deploy nginx
kubectl delete hpa nginx
#Or
kubectl delete deploy/nginx hpa/nginx
kubectl create job pi --image=perl -- perl -Mbignum=bpi -wle 'print bpi(2000)'
kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big)
kubectl get po # get the pod name
kubectl logs pi-**** # get the pi numbers
kubectl delete job pi
OR
kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big)
kubectl logs job/pi
kubectl delete job pi
kubectl create job busybox --image=busybox -- /bin/sh -c 'echo hello;sleep 30;echo world'
kubectl get po # find the job pod
kubectl logs busybox-ptx58 -f # follow the logs
kubectl get jobs
kubectl describe jobs busybox
kubectl logs job/busybox
kubectl delete job busybox
kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'while true; do echo hello; sleep 10;done' > job.yaml
vi job.yaml
Add job.spec.activeDeadlineSeconds=30
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
activeDeadlineSeconds: 30 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo hello; sleep 10;done
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'echo hello;sleep 30;echo world' > job.yaml
vi job.yaml
Add job.spec.completions=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
completions: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create -f job.yaml
Verify that it has been completed:
kubectl get job busybox -w # will take two and a half minutes
kubectl delete jobs busybox
vi job.yaml
Add job.spec.parallelism=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
parallelism: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create -f job.yaml
kubectl get jobs
It will take some time for the parallel jobs to finish (>= 30 seconds)
kubectl delete job busybox
kubernetes.io > Documentation > Tasks > Run Jobs > Running Automated Tasks with a CronJob
kubectl create cronjob busybox --image=busybox --schedule="*/1 * * * *" -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster'
kubectl get cj
kubectl get jobs --watch
kubectl get po --show-labels # observe that the pods have a label that mentions their 'parent' job
kubectl logs busybox-1529745840-m867r
# Bear in mind that Kubernetes will run a new job/pod for each new cron job
kubectl delete cj busybox
kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml
vi time-limited-job.yaml
Add cronjob.spec.jobTemplate.spec.activeDeadlineSeconds=17
apiVersion: batch/v1beta1
kind: CronJob
metadata:
creationTimestamp: null
name: time-limited-job
spec:
jobTemplate:
metadata:
creationTimestamp: null
name: time-limited-job
spec:
activeDeadlineSeconds: 17 # add this line
template:
metadata:
creationTimestamp: null
spec:
containers:
- args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
image: busybox
name: time-limited-job
resources: {}
restartPolicy: Never
schedule: '* * * * *'
status: {}