openstack 彻底删除nova节点
发表于:2025-12-03 作者:千家信息网编辑
千家信息网最后更新 2025年12月03日,1,将节点上所有实例迁移都其他节点2,停止节点服务2,进入数据库,use nova;MariaDB [nova]> select * from nova.services;+-------------
千家信息网最后更新 2025年12月03日openstack 彻底删除nova节点
1,将节点上所有实例迁移都其他节点
2,停止节点服务
2,进入数据库,use nova;
MariaDB [nova]> select * from nova.services;+---------------------+---------------------+------------+-----+----------------------+--------------------+-------------+--------------+----------+---------+-----------------+---------------------+-------------+---------+--------------------------------------+| created_at | updated_at | deleted_at | id | host | binary | topic | report_count | disabled | deleted | disabled_reason | last_seen_up | forced_down | version | uuid |+---------------------+---------------------+------------+-----+----------------------+--------------------+-------------+--------------+----------+---------+-----------------+---------------------+-------------+---------+--------------------------------------+| 2019-03-11 10:30:01 | NULL | NULL | 4 | 100.100.32.201 | nova-osapi_compute | NULL | 0 | 0 | 0 | NULL | NULL | 0 | 35 | 3becc105-44cf-4885-8b2d-8048d00ded47 || 2019-03-11 10:30:01 | NULL | NULL | 10 | 100.100.32.201 | nova-metadata | NULL | 0 | 0 | 0 | NULL | NULL | 0 | 35 | 387e3f1f-0823-4224-831b-df08cb2c1746 || 2019-03-11 10:30:13 | 2019-07-31 02:30:14 | NULL | 19 | wuhan32-ceph01.v3.os | nova-scheduler | scheduler | 1202548 | 0 | 0 | NULL | 2019-07-31 02:30:14 | 0 | 35 | d4c9496c-34f7-4f2f-9834-467cfafb01ac || 2019-03-11 10:30:14 | NULL | NULL | 43 | 100.100.32.203 | nova-osapi_compute | NULL | 0 | 0 | 0 | NULL | NULL | 0 | 35 | 6009d41a-0b05-4137-8761-9fd12157e49c || 2019-03-11 10:30:14 | NULL | NULL | 46 | 100.100.32.202 | nova-osapi_compute | NULL | 0 | 0 | 0 | NULL | NULL | 0 | 35 | 3b04e9a1-a603-4b32-bff7-d6702fafdf27 || 2019-03-11 10:30:14 | NULL | NULL | 55 | 100.100.32.203 | nova-metadata | NULL | 0 | 0 | 0 | NULL | NULL | 0 | 35 | 9a75e21e-856e-43f1-a876-fa1f301122f9 || 2019-03-11 10:30:15 | NULL | NULL | 61 | 100.100.32.202 | nova-metadata | NULL | 0 | 0 | 0 | NULL | NULL | 0 | 35 | 44068439-287a-454b-acfc-4330a63ed868 || 2019-03-11 10:30:18 | 2019-07-31 02:30:09 | NULL | 73 | wuhan32-ceph03.v3.os | nova-scheduler | scheduler | 1202466 | 0 | 0 | NULL | 2019-07-31 02:30:09 | 0 | 35 | 1d6144dd-0b3b-4f45-99d2-5804938d5184 || 2019-03-11 10:30:19 | 2019-07-31 02:30:14 | NULL | 103 | wuhan32-ceph02.v3.os | nova-scheduler | scheduler | 1197330 | 0 | 0 | NULL | 2019-07-31 02:30:13 | 0 | 35 | 3f9a4d0c-b268-4351-bcc7-361eec6a4606 || 2019-03-11 10:30:21 | 2019-07-31 02:30:11 | NULL | 133 | wuhan32-ceph01.v3.os | nova-conductor | conductor | 1202912 | 0 | 0 | NULL | 2019-07-31 02:30:10 | 0 | 35 | 9c862d9e-451e-4a4d-9251-4e40f8a5c9a1 || 2019-03-11 10:30:24 | 2019-07-31 02:30:11 | NULL | 136 | wuhan32-ceph02.v3.os | nova-conductor | conductor | 1198093 | 0 | 0 | NULL | 2019-07-31 02:30:11 | 0 | 35 | c5cd2311-fb06-423e-9b94-f8e45815b8a1 || 2019-03-11 10:30:25 | 2019-07-31 02:30:14 | NULL | 145 | wuhan32-ceph03.v3.os | nova-conductor | conductor | 1202915 | 0 | 0 | NULL | 2019-07-31 02:30:14 | 0 | 35 | 3fd6eec5-e90c-4dd8-9f7c-36adb25b7b8c || 2019-03-11 10:30:27 | 2019-07-31 02:30:09 | NULL | 151 | wuhan32-ceph01.v3.os | nova-consoleauth | consoleauth | 1202847 | 0 | 0 | NULL | 2019-07-31 02:30:09 | 0 | 35 | 0c50d5f8-5d09-4ec6-b01c-4783ffb31b5c || 2019-03-11 10:30:31 | 2019-07-31 02:30:15 | NULL | 154 | wuhan32-ceph02.v3.os | nova-consoleauth | consoleauth | 1197846 | 0 | 0 | NULL | 2019-07-31 02:30:15 | 0 | 35 | 9ea57ee0-a3cf-4c93-b4a6-b4781d9219ca || 2019-03-11 10:30:31 | 2019-07-31 02:30:12 | NULL | 157 | wuhan32-ceph03.v3.os | nova-consoleauth | consoleauth | 1202629 | 0 | 0 | NULL | 2019-07-31 02:30:12 | 0 | 35 | c8e39978-db2f-4450-bbe1-6e55fe9ba2ac || 2019-03-11 10:31:17 | 2019-07-27 10:14:22 | NULL | 160 | wuhan32-ceph01.v3.os | nova-compute | compute | 1185461 | 1 | 0 | NULL | 2019-07-27 10:14:22 | 0 | 35 | 42b0abcf-5269-452d-8440-d94d3e65be5f || 2019-03-11 10:31:21 | 2019-07-31 02:30:15 | NULL | 163 | wuhan32-ceph02.v3.os | nova-compute | compute | 1199198 | 1 | 0 | NULL | 2019-07-31 02:30:15 | 0 | 35 | 6de82966-51fe-445a-a44c-cc3640f24135 || 2019-03-11 10:31:22 | 2019-07-31 02:30:10 | NULL | 166 | wuhan32-ceph03.v3.os | nova-compute | compute | 1203925 | 1 | 0 | NULL | 2019-07-31 02:30:10 | 0 | 35 | f03ff74a-0f25-48be-a0ee-bee119256a26 || 2019-03-20 04:42:44 | 2019-07-31 02:30:09 | NULL | 169 | wuhan32-nova01.v3.os | nova-compute | compute | 1135132 | 1 | 0 | NULL | 2019-07-31 02:30:09 | 0 | 35 | e69d60b3-6a14-423f-a1ea-c10efd372fb1 || 2019-03-20 04:42:44 | 2019-07-31 02:30:08 | NULL | 172 | wuhan32-nova02.v3.os | nova-compute | compute | 1135141 | 1 | 0 | NULL | 2019-07-31 02:30:08 | 0 | 35 | 1ed15525-1510-4ece-8159-57c500810a4c || 2019-03-20 09:15:32 | 2019-07-31 02:30:10 | NULL | 178 | wuhan32-nova03.v3.os | nova-compute | compute | 1133477 | 1 | 0 | NULL | 2019-07-31 02:30:10 | 0 | 35 | 3331d4e6-7a0d-4840-992f-676f82372870 || 2019-03-20 09:30:06 | 2019-07-31 02:30:14 | NULL | 184 | wuhan32-nova04.v3.os | nova-compute | compute | 1133343 | 0 | 0 | NULL | 2019-07-31 02:30:14 | 0 | 35 | 2afab391-6421-4765-a281-75d6e217decf || 2019-03-20 09:58:53 | 2019-07-31 02:30:09 | NULL | 190 | wuhan32-nova05.v3.os | nova-compute | compute | 1133243 | 1 | 0 | NULL | 2019-07-31 02:30:09 | 0 | 35 | 3ee7cc40-ba2b-4796-9b32-c993ce0bce2d || 2019-03-20 09:58:52 | 2019-07-31 02:30:09 | NULL | 193 | wuhan32-nova06.v3.os | nova-compute | compute | 1133251 | 1 | 0 | NULL | 2019-07-31 02:30:09 | 0 | 35 | 30c17f15-a759-4fd3-a9f3-96764224bd1c || 2019-06-03 06:40:29 | 2019-07-29 06:15:00 | NULL | 196 | wuhan32-nova07.v3.os | nova-compute | compute | 467760 | 1 | 0 | NULL | 2019-07-27 10:07:39 | 0 | 35 | c9671f52-8eb6-49b6-b2ae-b1e89cf545e3 || 2019-06-03 07:36:10 | 2019-07-26 04:14:16 | NULL | 200 | wuhan32-nova08.v3.os | nova-compute | compute | 455860 | 1 | 0 | NULL | 2019-07-26 01:56:38 | 0 | 35 | 4fbe496f-d2f0-4658-b814-808c1cef0b5b || 2019-06-03 08:13:41 | 2019-07-31 02:30:05 | NULL | 202 | wuhan32-nova09.v3.os | nova-compute | compute | 485954 | 1 | 0 | NULL | 2019-07-31 02:30:05 | 0 | 35 | cbc9232f-4b85-4d4e-8a1a-8899ff0aed2c || 2019-06-03 08:13:44 | 2019-07-31 02:30:12 | NULL | 205 | wuhan32-nova10.v3.os | nova-compute | compute | 485952 | 1 | 0 | NULL | 2019-07-31 02:30:12 | 0 | 35 | b228da41-7dba-42ab-adb5-340237f5a2d7 || 2019-06-10 07:17:15 | 2019-07-31 02:30:08 | NULL | 208 | wuhan32-nova11.v3.os | nova-compute | compute | 424052 | 1 | 0 | NULL | 2019-07-31 02:30:08 | 0 | 35 | 5ed6fed6-1dca-4a7a-b652-fc56c5b970b8 |+---------------------+---------------------+------------+-----+----------------------+--------------------+-------------+--------------+----------+---------+-----------------+---------------------+-------------+---------+--------------------------------------+MariaDB [nova]> delete from compute_nodes where hypervisor_hostname="wuhan32-nova11.v3.os";Query OK, 1 row affected (0.01 sec)MariaDB [nova]> delete from nova.services where host="wuhan32-nova11.v3.os";Query OK, 1 row affected (0.00 sec)删除这2条数据即可,这样wuhan32-nova11.v3.os彻底踢出节点
删的还不够彻底,今天新添加节点使用之前用过的主机名开通实例出现错误,这个地方也要删除
[root@wuhan32-ceph01 ~]# openstack resource provider list+--------------------------------------+----------------------+------------+| uuid | name | generation |+--------------------------------------+----------------------+------------+| 66d73349-df23-4f70-9b22-3a0de27e6e88 | wuhan32-ceph01.v3.os | 158 || d81ab5ff-c01b-4818-93c6-44dbe0ae9096 | wuhan32-ceph02.v3.os | 211 || 25103b21-4ebf-4dbf-a451-206435f2c88f | wuhan32-ceph03.v3.os | 147 || 404cbe8e-9319-40af-b4fe-ad8996cdcacd | wuhan32-nova01.v3.os | 129 || 312bb1b5-2172-4fbd-bd8e-0c3f739ffc93 | wuhan32-nova02.v3.os | 146 || b5b44b6b-78d2-402a-a77a-89c310e66c1e | wuhan32-nova03.v3.os | 132 || abe55f7b-2070-487e-a578-8b834754724e | wuhan32-nova04.v3.os | 128 || 32c8d62f-b527-4bce-9082-36d15c3fa51a | wuhan32-nova05.v3.os | 121 || e8e94453-c2eb-4678-b924-7363472161d9 | wuhan32-nova06.v3.os | 127 || c6e6c728-8d55-4252-9b98-2c4087f3072f | wuhan32-nova07.v3.os | 1361 || b8d405ed-fdc7-4899-8c37-149d614ca710 | wuhan32-nova08.v3.os | 631 || d0470e0a-9a1a-4b5e-833b-78c526437d9e | wuhan32-nova09.v3.os | 154 || d5d6523f-1090-4ef6-a43d-9e4e2ddf389c | wuhan32-nova10.v3.os | 193 || 2f9c131a-d88c-4d96-b4f8-2494051e28bb | wuhan32-nova11.v3.os | 20 |+--------------------------------------+----------------------+------------+删除:[root@wuhan32-ceph01 ~]# openstack resource provider delete 2f9c131a-d88c-4d96-b4f8-2494051e28bb
删除另一的时候报错
[root@wuhan32-ceph01 ~]# openstack resource provider delete c6e6c728-8d55-4252-9b98-2c4087f3072fUnable to delete resource provider c6e6c728-8d55-4252-9b98-2c4087f3072f: Resource provider has allocations. (HTTP 409)报错提示有关联,这个时候查找下关联信息
[root@wuhan32-ceph01 ~]# openstack resource provider show c6e6c728-8d55-4252-9b98-2c4087f3072f --allocations+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+| Field | Value |+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+| uuid | c6e6c728-8d55-4252-9b98-2c4087f3072f || name | wuhan32-nova07.v3.os || generation | 1362 || allocations | {u'1ae819ff-46d9-4fdf-b048-f9cde1700e31': {u'resources': {u'VCPU': 1, u'MEMORY_MB': 4096}}, u'dd15fcc5-4e21-4892-bb91-203b87122bbf': {u'resources': {u'VCPU': 2, u'MEMORY_MB': 4096}}, u'0e078645-f821-48e1-984c-9a8194c9cb3a': {u'resources': {u'VCPU': 2, u'MEMORY_MB': 4096}}, u'9c662c54-b320-4900-a528-2a6a489098b0': {u'resources': {u'VCPU': 4, u'MEMORY_MB': 8192}}, u'fcaa29f3-8146-4563-af9b-dda91a750a4f': {u'resources': {u'VCPU': 2, u'MEMORY_MB': 4096}}} |+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+[root@wuhan32-ceph01 ~]#可以看到关联了好几个uid,以这个为例1ae819ff-46d9-4fdf-b048-f9cde1700e31,就是一个实例ID,造成这个几个结果的原因我估计值之前迁移出现的bug
[root@wuhan32-ceph01 ~]# nova show 1ae819ff-46d9-4fdf-b048-f9cde1700e31+--------------------------------------+----------------------------------------------------------------------------------+| Property | Value |+--------------------------------------+----------------------------------------------------------------------------------+| OS-DCF:diskConfig | AUTO || OS-EXT-AZ:availability_zone | nova || OS-EXT1V-ATTR:host | wuhan32-nova01.v3.os || OS-EXT1V-ATTR:hostname | zhangwei || OS-EXT1V-ATTR:hypervisor_hostname | wuhan32-nova01.v3.os || OS-EXT1V-ATTR:instance_name | instance-00000229 || OS-EXT1V-ATTR:kernel_id | || OS-EXT1V-ATTR:launch_index | 0 || OS-EXT-1V-ATTR:ramdisk_id | || OS-EXT1V-ATTR:reservation_id | r-02v9hnvw || OS-EXT1V-ATTR:root_device_name | /dev/vda || OS-EXT1V-ATTR:user_data | - || OS-EXT-STS:power_state | 1 || OS-EXT-STS:task_state | - || OS-EXT-STS:vm_state | active || OS1V-USG:launched_at | 2019-07-30T01:56:54.000000 || OS1V-USG:terminated_at | - || accessIPv4 | || accessIPv6 | || config_drive | || created | 2019-05-16T07:06:37Z || description | zhangwei || flavor:disk | 20 || flavor:ephemeral | 0 || flavor:extra_specs | {} || flavor:original_name | c1-m4G-d20G || flavor:ram | 4096 || flavor:swap | 0 || flavor:vcpus | 1 || hostId | 7203e247b590b284207f3027fe90bc39399b0a7665e8d9098542b849 || host_status | MAINTENANCE || id | 1ae819ff-46d9-4fdf-b048-f9cde1700e31 || image | Attempt to boot from volume - no image supplied || key_name | - || locked | False || metadata | {} || name | zhangwei || net-lan44 network | 192.168.44.90 || os-extended-volumes:volumes_attached | [{"id": "0def7579-083f-46a1-a2d7-108d0549bc68", "delete_on_termination": false}] || progress | 0 || security_groups | default || status | ACTIVE || tags | [] || tenant_id | 74b2419c5fe24ad288e934c926612c4b || trusted_image_certificates | - || updated | 2019-08-26T07:06:29Z || user_id | d1ee444f9127441ba6b3656c076a1eb2 |+--------------------------------------+----------------------------------------------------------------------------------+[root@wuhan32-ceph01 ~]#将这些实例迁移到其他节点上后,
[root@wuhan32-ceph01 ~]# openstack resource provider show c6e6c728-8d55-4252-9b98-2c4087f3072f --allocations+-------------+--------------------------------------+| Field | Value |+-------------+--------------------------------------+| uuid | c6e6c728-8d55-4252-9b98-2c4087f3072f || name | wuhan32-nova07.v3.os || generation | 1367 || allocations | {} |+-------------+--------------------------------------+[root@wuhan32-ceph01 ~]# openstack resource provider delete c6e6c728-8d55-4252-9b98-2c4087f3072f[root@wuhan32-ceph01 ~]# openstack resource provider list+--------------------------------------+----------------------+------------+| uuid | name | generation |+--------------------------------------+----------------------+------------+| 66d73349-df23-4f70-9b22-3a0de27e6e88 | wuhan32-ceph01.v3.os | 158 || d81ab5ff-c01b-4818-93c6-44dbe0ae9096 | wuhan32-ceph02.v3.os | 211 || 25103b21-4ebf-4dbf-a451-206435f2c88f | wuhan32-ceph03.v3.os | 147 || 404cbe8e-9319-40af-b4fe-ad8996cdcacd | wuhan32-nova01.v3.os | 135 || 312bb1b5-2172-4fbd-bd8e-0c3f739ffc93 | wuhan32-nova02.v3.os | 151 || b5b44b6b-78d2-402a-a77a-89c310e66c1e | wuhan32-nova03.v3.os | 132 || abe55f7b-2070-487e-a578-8b834754724e | wuhan32-nova04.v3.os | 128 || 32c8d62f-b527-4bce-9082-36d15c3fa51a | wuhan32-nova05.v3.os | 121 || e8e94453-c2eb-4678-b924-7363472161d9 | wuhan32-nova06.v3.os | 127 || d0470e0a-9a1a-4b5e-833b-78c526437d9e | wuhan32-nova09.v3.os | 154 || d5d6523f-1090-4ef6-a43d-9e4e2ddf389c | wuhan32-nova10.v3.os | 193 || e8a91696-225e-48e9-8cb9-ec36e941b18c | wuhan32-nova08.v3.os | 15 |+--------------------------------------+----------------------+------------+[root@wuhan32-ceph01 ~]#正常了
------------------------------------------
命令不能用的参考以下
# yum -y install epel-release# yum -y install python-pip# pip install osc-placement# . admin-openrc# openstack resource provider list+--------------------------------------+-----------------------+------------+| uuid | name | generation |+--------------------------------------+-----------------------+------------+| 1824de38-b376-4d05-8c42-3c5a65adb6cc | anchovy.lab.local | 11 |+--------------------------------------+-----------------------+------------+# openstack resource provider delete 1824de38-b376-4d05-8c42-3c5a65adb6cc# ssh 192.168.253.40# systemctl restart libvirtd.service openstack-nova-compute.service
节点
实例
数据
时候
关联
不够
主机
估计值
信息
原因
命令
地方
就是
数据库
结果
错误
下关
参考
提示
服务
数据库的安全要保护哪些东西
数据库安全各自的含义是什么
生产安全数据库录入
数据库的安全性及管理
数据库安全策略包含哪些
海淀数据库安全审计系统
建立农村房屋安全信息数据库
易用的数据库客户端支持安全管理
连接数据库失败ssl安全错误
数据库的锁怎样保障安全
计算机数据库理论知识
怎么取一个表格里的数据库
网络安全监测大厅布局设计
能够搭建服务器的代码是什么
现在的网络服务器
畅捷通t3与数据库
数据库第110页三级区
合肥2018网络安全大会
寓言故事思维导图软件开发
安卓软件开发教程jawa
网络安全宣传周h5图片
网络技术管理员岗位
无上商城软件开发平台
联想服务器关机错误无法启动
电脑百图秀软件服务器在哪里
南京智能软件开发用途
网络安全行为笔记
软件开发后要先进行
肇庆电商软件开发常见问题
包河区全过程网络技术咨询怎么样
长垣软件开发
如何加入通勤人员数据库
游戏软件开发需要什么电脑
应用数据库连接不上
计算机网络技术专业概述
配置mysql数据库链接
移动官方服务器网址
网络安全第三方测评机构如何推广
开关电源测试软件开发
数据安全对软件开发的要求