Outils pour utilisateurs

Outils du site


tech:notes_kubernetes_k8s_-_pb

Notes Kubernetes k8s - Pb

kind - ErrImageNeverPull

Voir :

kind load docker-image hello-python:latest hello-python:latest
 
kubectl apply -f deployment.yaml  # --validate=false
# kubectl get pods -o wide
NAME                            READY   STATUS              RESTARTS   AGE     IP            NODE                 NOMINATED NODE   READINESS GATES
hello-python-67978d6b66-spc7d   0/1     ErrImageNeverPull   0          4h50m   10.244.0.21   kind-control-plane   <none>           <none>
hello-python-67978d6b66-vmv27   0/1     ErrImageNeverPull   0          4h50m   10.244.0.20   kind-control-plane   <none>           <none>

Solution

crictl images est équivalent à docker images

Diag:

root@vmdeb01:~# docker exec -ti kind-control-plane /bin/bash
root@kind-control-plane:/# crictl images
kubectl delete deployment hello-python
 
docker build -f Dockerfile -t hello-python:v0.1 .
kind load docker-image hello-python:latest hello-python:v0.1

deployment.yaml

apiVersion: v1
kind: Service
metadata:
 name: hello-python-service
spec:
 selector:
   app: hello-python
 ports:
 - protocol: "TCP"
   port: 6000
   targetPort: 5000
 type: LoadBalancer
 
---
apiVersion: apps/v1
kind: Deployment
metadata:
 name: hello-python
spec:
 selector:
   matchLabels:
     app: hello-python
 replicas: 2
 template:
   metadata:
     labels:
       app: hello-python
   spec:
     containers:
     - name: hello-python
       #image: hello-python:latest
       image: hello-python:v0.1  # <--- Solution
       imagePullPolicy: Never    # <--- Solution
       ports:
       - containerPort: 5000

Pb Status Error

Voir :

root@vmdeb01:~# kubectl get pods
NAME                            READY   STATUS             RESTARTS      AGE
hello-python-7954bd58df-7qhj6   0/1     CrashLoopBackOff   4 (22s ago)   117s
hello-python-7954bd58df-v4bmx   0/1     CrashLoopBackOff   4 (36s ago)   117s
  1. # kubectl logs hello-python-7954bd58df-7qhj6 -c <CONTAINER_NAME>
  2. kubectl logs hello-python-7954bd58df-7qhj6

python: can't open file '/app/main.py': [Errno 2] No such file or directory </code>

kubectl get pods -l app=myapp-deployment

Pb Kubeadm 2

kubeadm join vmdeb02:6443 --token ujwgb5.we2fa5y7z1vtzsmd         --discovery-token-ca-cert-hash sha256:fdbc20cfef538613e872378e5a0e0305fd5de2caaa04db3d159633086eb30d7c
[preflight] Running pre-flight checks


error execution phase preflight: couldn't validate the identity of the API Server: Get "https://vmdeb02:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s": dial tcp 192.168.100.12:6443: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher

Le port 6443 n'est pas en écoute sur le Master

Solution

Sur le master

kubeadm reset
kubeadm init --control-plane-endpoint=192.168.100.12:6443 --skip-phases=addon/kube-proxy

Pb réseau pod still ContainerCreating starte

Voir :

# kubectl get pods -n kube-system |egrep -v "Running"
NAME                              READY   STATUS              RESTARTS       AGE
coredns-76f75df574-4pqxw          0/1     ContainerCreating   0              38m
coredns-76f75df574-lfdvp          0/1     ContainerCreating   0              38m
weave-net-f9p5b                   0/2     CrashLoopBackOff    18 (46s ago)   33m
weave-net-qj9zd                   1/2     CrashLoopBackOff    18 (80s ago)   33m

root@vmdeb02:~# kubectl describe pod -n kube-system weave-net-f9p5b |tail |grep -v Normal
  Warning  BackOff         2m26s                  kubelet            Back-off restarting failed container weave in pod weave-net-f9p5b_kube-system(51e1d7d8-fe7f-4394-9b53-212ac3dbb865)
  Warning  Unhealthy       2m10s (x7 over 2m56s)  kubelet            Readiness probe failed: Get "http://127.0.0.1:6784/status": dial tcp 127.0.0.1:6784: connect: connection refused

Forbiden

"Error from server (Forbidden): error when creating "https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml": deployments.apps "dashboard-metrics-scraper" i
s forbidden: unable to create new content in namespace kubernetes-dashboard because it is being terminated"

Solution

kubectl -n kubernetes-dashboard delete pod,svc --all
kubectl -n kubernetes-dashboard delete pod,svc --all --force --grace-period 0

Pb access Kubernetes Dashboard Error trying to reach service: 'dial tcp 10.244.2.4:8443: i/o timeout'

ssh -L8001:localhost:8001 kub1
sudo kubectl proxy

http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

Error trying to reach service: 'dial tcp 10.244.2.8:8443: i/o timeout'

Kubernetes-dashboard

kubectl --namespace=kubernetes-dashboard port-forward kubernetes-dashboard-b7ffbc8cb-2kwxp 8443
curl 127.0.0.1:8001/api

Solution

ssh -L8443:10.244.2.8:8443 kub3

https://127.0.0.1:8443/

Après nous avons le choix :

  • Please select the kubeconfig file that you have created to configure access to the cluster. To find out more about how to configure and use kubeconfig file, please refer to the Configure Access to Multiple Clusters section.
  • Every Service Account has a Secret with valid Bearer Token that can be used to log in to Dashboard. To find out more about how to configure and use Bearer Tokens, please refer to the Authentication section.

Pb l'external-ip reste en "pending'

$ kubectl get services nginx-web-svc
NAME            TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
nginx-web-svc   LoadBalancer   10.105.197.167   <pending>     80:32618/TCP   18h

Probablement qu'il n'y a pas de Ingress controller.

Solution : NodePort

kubectl edit services nginx-web-svc

Changer

''type: LoadBalancer'' en ''type: NodePort''

Voir aussi type: ClusterIP

Pb Metrics-server - tls: failed to verify certificate:

$ kubectl -n kube-system describe deploy metrics-server |grep ^Selector:
Selector:               k8s-app=metrics-server

$ kubectl -n kube-system get pods -l k8s-app=metrics-server
NAME                              READY   STATUS    RESTARTS   AGE
metrics-server-587b667b55-wt67b   1/1     Running   0          11m
metrics-server-587b667b55-wt67b -n kube-system
I0924 21:15:49.105305       1 server.go:191] "Failed probe" probe="metric-storage-ready" err="no metrics to serve"
E0924 21:15:57.723402       1 scraper.go:149] "Failed to scrape node" err="Get \"https://192.168.100.21:10250/metrics/resource\": tls: failed to verify certificate: x509: cannot validate certificate for 192.168.100.21 because it doesn't contain any IP SANs" node="vmdeb01.local"
E0924 21:15:57.726365       1 scraper.go:149] "Failed to scrape node" err="Get \"https://192.168.100.22:10250/metrics/resource\": tls: failed to verify certificate: x509: cannot validate certificate for 192.168.100.22 because it doesn't contain any IP SANs" node="vmdeb02

Solution

kubectl patch deployment metrics-server -n kube-system --type 'json' -p '[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--kubelet-insecure-tls"}]'

ou

kubectl edit deploy metrics-server -n kube-system
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxSurge: 25%
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=10250
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls            # <-- Ajouter cette ligne

http://www.mtitek.com/tutorials/kubernetes/install-kubernetes-metrics-server.php

Autres

--kubelet-preferred-address-types=InternalIP

tech/notes_kubernetes_k8s_-_pb.txt · Dernière modification : de Jean-Baptiste

Donate Powered by PHP Valid HTML5 Valid CSS Driven by DokuWiki