Outils pour utilisateurs

Outils du site


blog

Notes VMWare OpenStack VIO

Pb HTTP Error 503

Solution

Si connecter sur chaque controller et :

service apache2 restart

Ansible

sudo mkdir -p /opt/vmware/vio/custom
sudo cp /var/lib/vio/ansible/custom/custom.yml.sample /opt/vmware/vio/custom/custom.yml
# The maximum number of entities that will be returned in a collection, with no
# limit set by default.
#keystone_list_limit: 100
keystone_list_limit: 500
sudo viocli deployment configure

Source : https://docs.vmware.com/en/VMware-Integrated-OpenStack/5.1/integrated-openstack-51-administration-guide.pdf

role policy.yaml

2025/03/24 15:06

Notes VMWare OpenStack VIO - Configuration

Voir :

Config :

/etc/keystone/keystone.conf

[DEFAULT]                          
public_endpoint = https://192.168.51.61:5000/
admin_endpoint = https://192.168.51.61:35357/
member_role_name = _member_
list_limit = 500                                     
insecure_debug = False                               
debug = True                                
log_file = keystone.log
log_dir = /var/log/keystone
use_syslog = true
syslog_log_facility = LOG_LOCAL7      
default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.d
ogpile=INFO,dogpile.lock=INFO
 
[auth]
methods = password,token,saml2,openid,mapped
 
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = 192.168.51.65:11211,192.168.51.66:11211
 
[database]
connection = CHANGEME
 
[federation]
trusted_dashboard = https://192.168.21.53/auth/websso/
trusted_dashboard = https://192.168.51.61/auth/websso/
 
[fernet_tokens]
max_active_keys = 2
 
[identity]
domain_specific_drivers_enabled = true
domain_configurations_from_database = False
 
[oslo_policy]
policy_file = /etc/keystone/policy.yaml
 
[resource]
admin_project_domain_name = Default
admin_project_name = admin
 
[saml2]
remote_id_attribute = Shib-Identity-Provider
 
[token]
expiration = 7200

/etc/keystone/domains/keystone.acme.conf

[identity]
domain_configurations_from_database = False
driver = ldap
list_limit = 500
 
[ldap]
query_scope = sub
group_name_attribute = sAMAccountName
group_objectclass = group
user_mail_attribute = mail
user_enabled_attribute = userAccountControl
group_tree_dn = CN=Openstack,OU=Groupes,DC=acme,DC=local
chase_referrals = false
user_id_attribute = sAMAccountName
group_members_are_ids = true
group_member_attribute = memberUid
page_size = 100
use_tls = false
url = ldaps://ldap.acme.local:636
user_name_attribute = sAMAccountName
user = admin
user_objectclass = organizationalPerson
group_id_attribute = cn
user_filter = (memberOf=CN=Openstack,OU=Groupes,DC=acme,DC=local)
group_desc_attribute = description
user_tree_dn = DC=acme,DC=local
user_pass_attribute = userPassword
password = CHANGEME

/etc/nova/nova.conf

[DEFAULT]                                                                                                                                                                                     
log_dir = /var/log/nova                       
lock_path = /var/lock/nova                                                                                                 
state_path = /var/lib/nova    
 
[api_database]                             
connection = sqlite:////var/lib/nova/nova_api.sqlite                                                                                                            
 
[cells]
enable = False
 
[database]
connection = sqlite:////var/lib/nova/nova.sqlite
 
[placement]
os_region_name = openstack

/etc/nova/nova-compute.conf

[DEFAULT]                                                              
compute_driver = vmwareapi.VMwareVCDriver      
allow_resize_to_same_host = true              
remove_unused_original_minimum_age_seconds = 86400
cpu_allocation_ratio = 10                                                                                                                                                                                          
ram_allocation_ratio = 1.5                                                                                                                                                             
disk_allocation_ratio = 0.0                                                                                                                                                            
resume_guests_state_on_host_boot = true
max_concurrent_builds = 20
block_device_allocate_retries = 1800         
heal_instance_info_cache_interval = 120                      
block_device_allocate_retries_interval = 2               
force_config_drive = False
dhcpbridge_flagfile = /etc/nova/nova.conf                      
dhcpbridge = /usr/bin/nova-dhcpbridge
metadata_host = 192.168.51.61                         
dhcp_domain = novalocal                                                                                                                                                                
web = /usr/share/vmware-mks
state_path = /var/lib/nova
periodic_fuzzy_delay = 120
debug = True
verbose = True
log_dir = /var/log/nova
use_syslog = true
syslog_log_facility = LOG_LOCAL7
rpc_response_timeout = 120
sync_power_state_action = dbsync
use_hypervisor_stats = True
 
[api]
use_forwarded_for = true
compute_link_prefix = https://192.168.21.53:8774
glance_link_prefix = https://192.168.21.53:9292
 
[api_database]
connection = "CHANGEME"
max_pool_size = 50
max_overflow = 50
 
[cache]
enabled = false
 
[cinder]
endpoint_template = https://192.168.51.61:8776/v3/%(project_id)s
api_insecure = true
 
[conductor]
workers = 2
 
[database]
connection = "CHANGEME"
 
[filter_scheduler]
max_io_ops_per_host = 8
max_instances_per_host = 50
 
[glance]
api_servers = https://192.168.51.61:9292
 
[keystone_authtoken]
memcached_servers = 192.168.51.65:11211,192.168.51.66:11211
auth_type = v3password
auth_url = https://192.168.51.61:35357/v3
project_name = service
username = nova
password = CHANGEME
project_domain_name = local
user_domain_name = local
 
[mks]
mksproxy_base_url = https://192.168.21.53:6090/vnc_auto.html
enabled = true
 
[neutron]
url = https://192.168.51.61:9696
service_metadata_proxy = true
metadata_proxy_shared_secret = CHANGEME
auth_type = v3password
auth_url = https://192.168.51.61:35357/v3
project_name = service
project_domain_name = local
username = neutron
user_domain_name = local
password = CHANGEME
 
[oslo_concurrency]
lock_path = /var/lock/nova
 
[oslo_messaging_rabbit]
rabbit_hosts = 192.168.51.62,192.168.51.63,192.168.51.64
rabbit_userid = test
rabbit_password = CHANGEME
rabbit_ha_queues = true
 
[oslo_messaging_zmq]
rpc_thread_pool_size = 100
 
[pci]
passthrough_whitelist = [{"vendor_id": "*", "product_id": "*"}]
 
[placement]
os_region_name = nova
os_interface = internal
auth_type = v3password
auth_url = https://192.168.51.61:35357/v3
project_name = service
project_domain_name = local
username = neutron
user_domain_name = local
password = CHANGEME
 
[oslo_concurrency]
lock_path = /var/lock/nova
 
[oslo_messaging_rabbit]
rabbit_hosts = 192.168.51.62,192.168.51.63,192.168.51.64
rabbit_userid = test
rabbit_password = CHANGEME
rabbit_ha_queues = true
 
[oslo_messaging_zmq]
rpc_thread_pool_size = 100
 
[pci]
passthrough_whitelist = [{"vendor_id": "*", "product_id": "*"}]
 
[placement]
os_region_name = nova
os_interface = internal
auth_type = v3password                                                                                                                                                                                             
auth_url = https://192.168.51.61:35357/v3
project_name = service
project_domain_name = local
username = placement
user_domain_name = local
password = CHANGEME
 
[vmware]
serial_port_service_uri = s1cb9is4rC66cr000791
serial_port_proxy_uri = telnets://192.168.51.71:13370#thumbprint=A9:CF:EC:E6:DD:00:6A:90:C4:F7:4B:83:11:C9:70:42:13:A9:08:36
serial_log_dir = /var/log/vspc
host_ip = 192.168.51.160
host_username = Administrator@vsphere.local
host_password = CHANGEME
insecure = True
cluster_name = Production
datastore_regex = production
vnc_port_total = 6500
use_linked_clone = True
cache_prefix = VIO_9a9c86dc379144d7a4f43919d9066315_b78814fd_domain-c34
store_image_dir = /images
snapshot_format = template
import_vm_enabled = True
import_vm_relocate = True
tenant_vdc = False
passthrough = False
 
[vnc]
enabled = False
vncserver_proxyclient_address = 192.168.51.160
novncproxy_base_url = https://192.168.21.53:6080/vnc_auto.html
 
[wsgi]
api_paste_config = /etc/nova/api-paste.ini

javascript /etc/glance/policy.json

{
    "context_is_admin":  "role:admin",
    "default": "role:admin",
 
    "add_image": "",
    "delete_image": "",
    "get_image": "",
    "get_images": "",
    "modify_image": "",
    "publicize_image": "role:admin",
    "communitize_image": "",
    "copy_from": "",
 
    "download_image": "",
    "upload_image": "",
 
    "delete_image_location": "",
    "get_image_location": "",
    "set_image_location": "",
 
    "add_member": "",
    "delete_member": "",
    "get_member": "",
    "get_members": "",
    "modify_member": "",
 
    "manage_image_cache": "role:admin",
 
    "get_task": "",
    "get_tasks": "",
    "add_task": "",
    "modify_task": "",
 
    "deactivate": "",
    "reactivate": "",
 
    "get_metadef_namespace": "",
    "get_metadef_namespaces":"",
    "modify_metadef_namespace":"",
    "add_metadef_namespace":"",
 
    "get_metadef_object":"",
    "get_metadef_objects":"",
    "modify_metadef_object":"",
    "add_metadef_object":"",
 
    "list_metadef_resource_types":"",
    "get_metadef_resource_type":"",
    "add_metadef_resource_type_association":"",
 
    "get_metadef_property":"",
    "get_metadef_properties":"",
    "modify_metadef_property":"",
    "add_metadef_property":"",
 
    "get_metadef_tag":"",
    "get_metadef_tags":"",
    "modify_metadef_tag":"",
    "add_metadef_tag":"",
    "add_metadef_tags":"" 
}

Console FR /?locale=fr_FR
Voir https://kb.vmware.com/s/article/1016403

2025/03/24 15:06

Notes virtualisation

Gestion Automatique d’Environnement Virtuel :

OpenVZ

OpenVZ Proxmox

LXC vs OpenVZ

Savoir si on est dans une VM

# SystemD
systemd-detect-virt
hostnamectl
 
virt-what
virtdetect
 
grep -q '^flags.* hypervisor' /proc/cpuinfo
jean@vps1:~$ systemd-detect-virt --vm
vmware
jean@vps1:~$ systemd-detect-virt
openvz

Suis-je dans un conteneur (container) Docker ?

grep 'systemd:/system.slice/docker-' /proc/self/cgroup

OpenVZ ?

cat /proc/vz/veinfo

Sinon dmesg ou lsmod, lspci permet souvent de ce faire une idée

2025/03/24 15:06

Notes Virtualbox

Conversion vmdk (vmware) en vdi (VirtualBox)

"c:\program files\oracle\virtualbox\vboxmanage" clonehd SWF73-V1-0-disk1.vmdk new.vdi --format VDI

Si clone de disque même UUID

rem "c:\program files\oracle\virtualbox\vboxmanage" sethduuid plop.vhd
"c:\program files\oracle\virtualbox\vboxmanage" internalcommands sethduuid plop.vhd

Defrag / Compact / Shrink

VBoxManage.exe modifymedium disk F:\VMs\vmdeb1/vmdeb1.vdi --compact

Pb Erreurs

VBOX_E_OBJECT_NOT_FOUND

Solution

"c:\program files\oracle\virtualbox\vboxmanage" internalcommands sethduuid F:\install\VM\plop-clone.vdi
2025/03/24 15:06

Notes VIO VMware Integrated OpenStack k8s

Commandes kubernetes ​utiles pour VIO

Lister tous les pods de tous les namespaces

kubectl get pods --all-namespaces

pour lister les logs d'un pods ici neutron-dhcp-agent-default-2s4z9 du namespace openstack :

kubectl logs --namespace=openstack neutron-dhcp-agent-default-2s4z9 --all-containers

Un shell sur un pod

kubectl exec -it --namespace=openstack neutron-dhcp-agent-default-2s4z9 -- /bin/bash

Exécuter une commande sur un pod sans ouvrir un shell dessus:

kubectl exec  --namespace=openstack neutron-dhcp-agent-default-2s4z9 -- ls

Administration

Mot de passe / password / secret

$ kubectl -n openstack get secrets managedpasswords -o yaml |grep sql_root_password
sql_root_password: ejdKNjk1anNqcXR0bDd2a2c3NDVnaHduMnhteDVtNno=
 
$ echo ejdKNjk1anNqcXR0bDd2a2c3NDVnaHduMnhteDVtNno= |base64 -D
Alias
alias osapply='osctl apply'
alias osctl='kubectl -n openstack'
alias osctlw='osctl --watch'
alias osdel='osctl delete'
alias osedit='osctl edit'
alias osget='osctl get'
alias oslog='osctl logs'
alias pods='kubectl get pods --all-namespaces --watch'

Exemples de commandes

viocli get deployment
kubectl -n openstack get pods
kubectl -n openstack get pods -o wide
osctl get pods
osctl get nodes -o wide
kubectl get svc
 
osctl describe pod mariadb-server-1
 
journalctl -u docker.service -u kubelet.service --no-pager -f
 
helm list
helm list -a
helm list xa
 
oslog mariadb-server-1
 
ovfenv
timedatectl
Autres

Autres

https://journaldunadminlinux.fr/tutoriel-installez-facilement-un-cluster-kubernetes-sous-debian-ou-centos/

kubeadm config images pull --v=5
#kubeadm reset
kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.21
kubectl get daemonsets --all-namespaces
kubectl describe ds
kubectl describe ds --all-namespaces
kubectl get events
kubectl get events --all-namespaces
kubectl cordon node-plop
kubectl drain node-plop --ignore-daemonsets --delete-local-data
kubectl delete node node-plop
kubectl -n openstack delete machine node-plop
viossh ()
{
    ssh -i /root/.ssh/vsphere_tmp vioadmin@$(kubectl get nodes -o jsonpath='{.status.addresses[?(@.type=="ExternalIP")].address}' $1)
}

Get a Shell to a Running Container

Voir https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/

Opening a shell when a Pod has more than one Container use --container or -c

kubectl -n namespace exec -it my-pod --container main-app -- /bin/bash

Lister tous les conteneurs d'un pod

kubectl -n openstack describe pod/vioadmin1-vioshim-7b6dc9f947-297lg 
 
kubectl -n openstack get pods vioadmin1-vioshim-7b6dc9f947-297lg -o jsonpath='{.spec.containers[*].name}*'

OpenStack VIO k8s - Troubleshooting

Troubleshooting VIO 6.0

==============================
Troubleshooting Progression :
------------------------------
a- services running on Photon OS:
		If you encounter a problem running an application or appliance on Photon OS and you suspect it involves the operating system, you can troubleshoot by proceeding as follows.
		1- Check the services running on Photon OS  : systemctl status or systemctl --failed
		2- Check jobs : osctl get job  
		3- Check the operating system log files /var/log  et   / :
			journalctl  
			...
			Next, run the following commands to view all services according to the order in which they were started:
			systemd-analyze critical-chain
			Use the troubleshooting tool that you think is most likely to help with the issue at hand. 
			For example, use strace to identify the location of the failure.
			

b-	deployment VIO 6.0  ready :
		root@vio-mgt [ ~ ]# viocli get deployment
		PUBLIC VIP     PRIVATE VIP    HIGH AVAILABILITY
		172.18.21.53   172.18.51.89   Enabled
		NODE NAME                     ROLE         VALIDATION   STATUS    IP
		controller-cwpxtjf97w         Controller   Success      Running   172.18.51.62
		controller-h5dddpj668         Controller   Success      Running   172.18.51.61
		controller-l2c8fpsd8g         Controller   Success      Running   172.18.51.63
		vio-mgt.etudes.acme.local   Manager      Success      Running   172.18.51.60
		SERVICE        CONTROLLER                       READY   FAILURES
		barbican       barbican-api                      2/2       -
					   barbican-ks-listener              2/2       -
					   barbican-worker                   2/2       -
		cinder         cinder-api                        2/2       -
					   cinder-scheduler                  2/2       -
					   cinder-volume                     2/2       -
		glance         glance-api                        1/1       -
					   glance-vmw-replicator             1/1       -
		heat           heat-api                          2/2       -
					   heat-cfn                          2/2       -
					   heat-engine                       2/2       -
		horizon        horizon                           2/2       -
		ingress        ingress                           2/2       -
					   ingress-error-pages               1/1       -
		keystone       keystone-api                      2/2       -
		mariadb        mariadb-server                    3/3       -
					   mariadb-ingress                   2/2       -
					   mariadb-ingress-error-pages       2/2       -
					   mariadb1-etcd                     3/3       -
		memcached      memcached1-memcached              1/1       -
					   memcached1-memcached-secondary    1/1       -
		neutron        neutron-dhcp-agent-default        3/3       -
					   neutron-metadata-agent-default    3/3       -
					   neutron-server                    2/2       -
		nova           nova-api-metadata                 2/2       -
					   nova-api-osapi                    2/2       -
					   nova-conductor                    2/2       -
					   nova-consoleauth                  1/1       -
					   nova-mksproxy                     1/1       -
					   nova-placement-api                2/2       -
					   nova-scheduler                    2/2       -
		nova-compute   compute-b78814fd-c34-compute      1/1       -
		openvswitch    openvswitch-db                    3/3       -
					   openvswitch-vswitchd              3/3       -
		rabbitmq       rabbitmq1-rabbitmq                3/3       -
		vioshim        vioadmin1-vioshim                 1/1       -
		vioutils       node-config-manager               3/3       -
		OpenStack Deployment State: RUNNING
			
			
			
c- analyse  controller openstasck-control-plane  running on Photon OS and status ready:

		root@vio-mgt [ ~ ]# kubectl get nodes
		NAME                          STATUS   ROLES                     AGE     VERSION
		controller-cwpxtjf97w         Ready    openstack-control-plane   6d19h   v1.14.1
		controller-h5dddpj668         Ready    openstack-control-plane   6d19h   v1.14.1
		controller-l2c8fpsd8g         Ready    openstack-control-plane   6d19h   v1.14.1
		vio-mgt.etudes.acme.local   Ready    master                    6d19h   v1.14.1
		
		root@vio-mgt [ ~ ]# kubectl get deployments
		NAME                                             READY   UP-TO-DATE   AVAILABLE   AGE
		vio-docker-registry                              1/1     1            1           6d20h
		vio-helm-repo                                    1/1     1            1           6d20h
		vio-ingress-cntl-nginx-ingress-controller        1/1     1            1           6d20h
		vio-ingress-cntl-nginx-ingress-default-backend   1/1     1            1           6d20h
		vio-webui                                        1/1     1            1           6d20h
		
		root@vio-mgt [ ~ ]# kubectl get pods
			NAME                                                              READY   STATUS    RESTARTS   AGE
			vio-docker-registry-ca-2vz8c                                      	1/1     Running   0          6d19h
			vio-docker-registry-ca-5hqxb                                      	1/1     Running   0          6d19h
			vio-docker-registry-ca-c9msq                                      	1/1     Running   0          6d19h
			vio-docker-registry-ca-jl7t4                                      	1/1     Running   0          6d20h
			vio-docker-registry-ddf5c8fc6-k5c7h                               	1/1     Running   0          6d20h
			vio-helm-repo-647784c488-xmshc										1/1     Running   0          6d20h
			vio-ingress-cntl-nginx-ingress-controller-7969c994b-824wx			1/1     Running   0          6d20h
			vio-ingress-cntl-nginx-ingress-default-backend-84ff56ff69-87pmp   	1/1     Running   0          6d20h
			vio-webui-5fbd4b7589-9b5ns                                        	1/1     Running   0          6d20h
			vio-webui-auth-proxy-0												1/1     Running   0          6d20h
		
		pour un service donné :  glance 
		root@vio-mgt [ ~ ]# kubectl -n openstack get pods |grep glance
			glance-api-79d574b8b8-5l6nh                                       2/2     Running     0          4h53m
			glance-bootstrap-ncln7                                            0/1     Completed   0          4h53m
			glance-db-init-x7cj9                                              0/1     Completed   0          4h53m
			glance-db-sync-tqdv7                                              0/1     Completed   0          4h53m
			glance-ks-endpoints-ljbx7                                         0/3     Completed   0          4h53m
			glance-ks-service-9lhxq                                           0/1     Completed   0          4h53m
			glance-ks-user-ljfm9                                              0/1     Completed   0          4h53m
			glance-metadefs-gfwtv                                             0/1     Completed   0          4h53m
			glance-rabbit-init-ttwcx                                          0/1     Completed   0          4h53m
			glance-storage-init-twtc2                                         0/1     Completed   0          4h53m
			glance-vmw-replicator-859f8fd458-dkmrp                            1/1     Running     0          4h53m
			helm-glance-glance1-bfcqq5gj6m-77cgr                              0/1     Completed   0          4h54m
			valid-glance-glance1-drstn2qx47-2wtzp 

		Recherche  "Error"  pour des pods :
		root@vio-mgt [ ~ ]# kubectl -n openstack get pods | grep  "Error"
		
		Recherche  des Datastores de  "Glance"	
		kubectl -n openstack get glances.vio.vmware.com glance1 -o yaml | grep -i datastore	

		Consulter les Logs  de glance-api and glance-vmw-replicator  :
		kubectl logs -n openstack glance-api-79d574b8b8-5l6nh glance-api -f
		kubectl logs -n openstack  glance-vmw-replicator-859f8fd458-dkmrp  glance-vmw-replicator -f
ovfenv > ovfenv.txt
kubectl get pods -A -o wide > pods.txt
journalctl -u kubelet > kubelet.txt
journalctl -u docker > docker.txt
viocli generate supportbundle

Pb

OpenStack VIO - Pb Réseau - Neutron - Init ImagePullBackOff
$ openstack image list
+--------------------------------------+------------+--------+
| ID                                   | Name       | Status |
+--------------------------------------+------------+--------+
| f423c7c6-17b4-4420-938b-cff43ab2a6bd | Debian-9.3 | active |
| f8029b36-ce50-4511-8087-efeaf0fdde7a | Debian-9.8 | active |
| 3e787c71-acda-4b7e-8df1-b988823204ac | MEM        | active |
| 0bd6f7de-6da0-4f4e-b81f-f42c0aae2c58 | Photon 3.0 | active |
| 2198532e-84b2-41d0-8d75-0d560f4ac122 | SWF63-V2   | active |
| 46584f47-9fa0-4963-b711-83fd429eec17 | SWF69-V6   | active |
| 1c03e029-d7be-466c-bfb2-fbcb03037af1 | SWF71-V1   | active |
| 140e4ea5-1d72-4884-a4c1-a477ac06c317 | SWF79-V3   | active |
| d4343c41-bfa9-49d8-a4e9-f19bb6d91cd0 | SWF83-V2   | active |
| 7f0008dc-69a9-46c9-aa88-f51a6ec08f67 | SWF89-V1   | active |
| 64977fc8-1f1b-4d31-b395-00978827c7c0 | SWF90-V4   | active |
| 7bf26ab2-09fc-495d-be1e-810cbc1c72e6 | Win-2-NoOk | active |
| 2bb95302-b32f-49a7-b677-3962cf5f648f | Win-NoOk   | active |
| dbdd23d1-1706-45fe-9c4b-8bda84017c98 | Win_1-NoOk | active |
+--------------------------------------+------------+--------+
$ openstack image list
Internal Server Error (HTTP 500)

root@vio-mgt [ ~ ]# viocli get deployment 
neutron        neutron-dhcp-agent-default        2/3       -
               neutron-metadata-agent-default    2/3       -
OpenStack Deployment State: DEGRADED

root@vio-mgt [ ~ ]# reboot

root@vio-mgt [ ~ ]# viocli get deployment 
neutron        neutron-dhcp-agent-default        2/3       -
               neutron-metadata-agent-default    2/3       -
vioshim        vioadmin1-vioshim                 0/1       -
OpenStack Deployment State: OUTAGE

root@vio-mgt [ ~ ]# kubectl get pods --all-namespaces |egrep -v "Completed|Running"
NAMESPACE            NAME                                                              READY   STATUS                  RESTARTS   AGE
kube-system          weave-net-9rwmg                                                   0/2     ErrImageNeverPull       0          94m
openstack            cinder-scheduler-d8578f6d-vjm5s                                   0/1     Evicted                 0          2d20h
openstack            cinder-volume-usage-audit-1585307400-xdn86                        0/1     Init:0/1                0          95m
openstack            compute-b78814fd-c34-compute-0                                    1/2     CrashLoopBackOff        21         2d19h
openstack            glance-api-7fd496db87-czf2g                                       0/2     Evicted                 0          2d20h
openstack            glance-api-7fd496db87-f5mkp                                       0/2     Evicted                 0          2d20h
openstack            heat-api-84f4fc7666-8btcb                                         0/2     Evicted                 0          2d20h
openstack            heat-engine-cleaner-1585307400-mkkqw                              0/1     Init:0/1                0          95m
openstack            horizon-55868f757f-4p44w                                          0/1     Evicted                 0          2d20h
openstack            ingress-78c67ccdcf-2f967                                          0/3     Evicted                 0          2d20h
openstack            keystone-api-ddf57bdc9-gpzsl                                      0/1     Evicted                 0          2d18h
openstack            keystone-fernet-rotate-1585310400-vd27t                           0/1     Init:0/1                0          45m
openstack            mariadb-server-1                                                  0/1     Init:0/2                0          106m
openstack            neutron-dhcp-agent-default-cm5pd                                  0/1     Init:ImagePullBackOff   0          96m
openstack            neutron-metadata-agent-default-pw2hl                              0/1     Init:ImagePullBackOff   0          95m
openstack            neutron-server-7744c854c9-lpl2d                                   0/2     Evicted                 0          2d20h
openstack            nova-api-osapi-784846d95c-64vv4                                   0/2     Evicted                 0          2d20h
openstack            nova-cell-setup-1585310400-94jjg                                  0/1     Init:0/1                0          45m
openstack            nova-scheduler-66d85c789c-tmgj5                                   0/1     Evicted                 0          2d20h
root@vio-mgt [ ~ ]# 

# kubectl logs -n openstack neutron-dhcp-agent-default-cm5pd neutron-dhcp-agent-default -f
Error from server (BadRequest): container "neutron-dhcp-agent-default" in pod "neutron-dhcp-agent-default-cm5pd" is waiting to start: PodInitializing

root@vio-mgt [ ~ ]# kubectl get pods -n openstack  |egrep "neutron-dhcp-agent-default|neutron-metadata-agent-default"
neutron-dhcp-agent-default-4rl9l                                  1/1     Running                 0          2d20h
neutron-dhcp-agent-default-7xhds                                  1/1     Running                 0          2d20h
neutron-dhcp-agent-default-cm5pd                                  0/1     Init:ImagePullBackOff   0          123m
neutron-metadata-agent-default-hxd2s                              1/1     Running                 0          2d20h
neutron-metadata-agent-default-pw2hl                              0/1     Init:ImagePullBackOff   0          121m
neutron-metadata-agent-default-vmls5                              1/1     Running                 0          2d20h

Resolution ?

You can delete the weave pod and the pod might be able to pull the image.
2025/03/24 15:06
blog.txt · Dernière modification : de 127.0.0.1

Donate Powered by PHP Valid HTML5 Valid CSS Driven by DokuWiki