-
Notifications
You must be signed in to change notification settings - Fork 968
Labels
JiraTriggers the synchronization of a GitHub issue in JiraTriggers the synchronization of a GitHub issue in Jira
Description
Please confirm
- I have searched existing issues to check if an issue already exists for the bug I encountered.
Distribution
ubuntu
Distribution version
24.04
Output of "snap list --all lxd core20 core22 core24 snapd"
root@node1:~# snap list --all lxd core20 core22 core24 snapd
Name Version Rev Tracking Publisher Notes
core24 20250504 988 latest/stable canonical✓ base,disabled
core24 20250526 1006 latest/stable canonical✓ base
lxd 6.4-2336111 34099 6/stable canonical✓ disabled,in-cohort
lxd 6.4-1c216d4 34285 6/stable canonical✓ in-cohort
snapd 2.68.5 24718 latest/stable canonical✓ snapdOutput of "lxc info" or system info if it fails
root@node1:~# lxc info
config:
cluster.https_address: 10.0.0.61:8443
core.bgp_address: 192.168.3.61
core.bgp_asn: "65061"
core.bgp_routerid: 192.168.3.61
core.https_address: 192.168.3.61:8443
network.ovn.ca_cert: |-
-----BEGIN CERTIFICATE-----
MIIB/DCCAYKgAwIBAgIRAPytFAKt+EKTUSbWCGEwIgIwCgYIKoZIzj0EAwMwPzER
MA8GA1UEChMITWljcm9PVk4xFDASBgNVBAsTC01pY3JvT1ZOIENBMRQwEgYDVQQD
EwtNaWNyb09WTiBDQTAeFw0yNTA2MjAxNDQ5NDJaFw0zNTA2MTgxNDQ5NDJaMD8x
ETAPBgNVBAoTCE1pY3JvT1ZOMRQwEgYDVQQLEwtNaWNyb09WTiBDQTEUMBIGA1UE
AxMLTWljcm9PVk4gQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASzCbsL9XLgeCFt
yX20m8qVyo/BxCS+ujXorY7BxN/X6HE9YPzYkVHEyiBV6fVCNDL7C606MSTGtghj
mKMLpfgy3nWvS9CoOVLAzVnJGWC+pX+cocdOFVCis8lwAUvEbqKjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSOTaO2Z6c75ddj
ACiVxnQVrIPbRjAKBggqhkjOPQQDAwNoADBlAjEAr04rkMRQO/jeKnKooNHx7DFf
DiYTdOzZPy8rMvrMbCMWfRE2cIobtMRk0qS1gPzvAjBOLlcJbMU9lfEOUPVppfNG
TixEl1vzwsmsBjWIaK+JbImEXH2oRgsyNvwAhmXTJaM=
-----END CERTIFICATE-----
network.ovn.client_cert: |-
-----BEGIN CERTIFICATE-----
MIIB8DCCAXWgAwIBAgIQfZrZFrfEuz8zNvMY7twRxTAKBggqhkjOPQQDAzA/MREw
DwYDVQQKEwhNaWNyb09WTjEUMBIGA1UECxMLTWljcm9PVk4gQ0ExFDASBgNVBAMT
C01pY3JvT1ZOIENBMB4XDTI1MDYyMDE0NDk0MloXDTI3MDYyMDE0NDk0MlowNDER
MA8GA1UEChMITWljcm9PVk4xDzANBgNVBAsTBmNsaWVudDEOMAwGA1UEAxMFbm9k
ZTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQwYhCf7PHiwkGdtkGocWnnhNlpDFrN
dh+T4gVyG0zKExGw6m3YhOB9TAaarOQjrNoFr2IBF05Jnlhs7rlS995cJ8ZndMuk
MML8ipDm8Zn51o3sAMlnBReWzVSOzJA6eMSjQTA/MA4GA1UdDwEB/wQEAwID6DAM
BgNVHRMBAf8EAjAAMB8GA1UdIwQYMBaAFI5No7Znpzvl12MAKJXGdBWsg9tGMAoG
CCqGSM49BAMDA2kAMGYCMQDQ9eWPW1a/j6ph5cAh7dzwKE9QM3evIWUEwtx5oF3n
ouf0o3yfFgejJ5D9AWkIP7kCMQDFcrhywzfKg5Az2bBzcMtbX8+FH35Y+3tqV1nW
bIUSYFVu97ior6JY7CLraJMZhkg=
-----END CERTIFICATE-----
network.ovn.client_key: |-
-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDAskSF7Dz6Phsd8spY7VzSJ9UEyrULK+eDzXfP9BAYtYAfOiqHEUije
fKHhrP8ygjegBwYFK4EEACKhZANiAAQwYhCf7PHiwkGdtkGocWnnhNlpDFrNdh+T
4gVyG0zKExGw6m3YhOB9TAaarOQjrNoFr2IBF05Jnlhs7rlS995cJ8ZndMukMML8
ipDm8Zn51o3sAMlnBReWzVSOzJA6eMQ=
-----END EC PRIVATE KEY-----
network.ovn.northbound_connection: ssl:192.168.3.61:6641,ssl:192.168.3.62:6641,ssl:192.168.3.63:6641
api_extensions:
- storage_zfs_remove_snapshots
- container_host_shutdown_timeout
- container_stop_priority
- container_syscall_filtering
- auth_pki
- container_last_used_at
- etag
- patch
- usb_devices
- https_allowed_credentials
- image_compression_algorithm
- directory_manipulation
- container_cpu_time
- storage_zfs_use_refquota
- storage_lvm_mount_options
- network
- profile_usedby
- container_push
- container_exec_recording
- certificate_update
- container_exec_signal_handling
- gpu_devices
- container_image_properties
- migration_progress
- id_map
- network_firewall_filtering
- network_routes
- storage
- file_delete
- file_append
- network_dhcp_expiry
- storage_lvm_vg_rename
- storage_lvm_thinpool_rename
- network_vlan
- image_create_aliases
- container_stateless_copy
- container_only_migration
- storage_zfs_clone_copy
- unix_device_rename
- storage_lvm_use_thinpool
- storage_rsync_bwlimit
- network_vxlan_interface
- storage_btrfs_mount_options
- entity_description
- image_force_refresh
- storage_lvm_lv_resizing
- id_map_base
- file_symlinks
- container_push_target
- network_vlan_physical
- storage_images_delete
- container_edit_metadata
- container_snapshot_stateful_migration
- storage_driver_ceph
- storage_ceph_user_name
- resource_limits
- storage_volatile_initial_source
- storage_ceph_force_osd_reuse
- storage_block_filesystem_btrfs
- resources
- kernel_limits
- storage_api_volume_rename
- network_sriov
- console
- restrict_devlxd
- migration_pre_copy
- infiniband
- maas_network
- devlxd_events
- proxy
- network_dhcp_gateway
- file_get_symlink
- network_leases
- unix_device_hotplug
- storage_api_local_volume_handling
- operation_description
- clustering
- event_lifecycle
- storage_api_remote_volume_handling
- nvidia_runtime
- container_mount_propagation
- container_backup
- devlxd_images
- container_local_cross_pool_handling
- proxy_unix
- proxy_udp
- clustering_join
- proxy_tcp_udp_multi_port_handling
- network_state
- proxy_unix_dac_properties
- container_protection_delete
- unix_priv_drop
- pprof_http
- proxy_haproxy_protocol
- network_hwaddr
- proxy_nat
- network_nat_order
- container_full
- backup_compression
- nvidia_runtime_config
- storage_api_volume_snapshots
- storage_unmapped
- projects
- network_vxlan_ttl
- container_incremental_copy
- usb_optional_vendorid
- snapshot_scheduling
- snapshot_schedule_aliases
- container_copy_project
- clustering_server_address
- clustering_image_replication
- container_protection_shift
- snapshot_expiry
- container_backup_override_pool
- snapshot_expiry_creation
- network_leases_location
- resources_cpu_socket
- resources_gpu
- resources_numa
- kernel_features
- id_map_current
- event_location
- storage_api_remote_volume_snapshots
- network_nat_address
- container_nic_routes
- cluster_internal_copy
- seccomp_notify
- lxc_features
- container_nic_ipvlan
- network_vlan_sriov
- storage_cephfs
- container_nic_ipfilter
- resources_v2
- container_exec_user_group_cwd
- container_syscall_intercept
- container_disk_shift
- storage_shifted
- resources_infiniband
- daemon_storage
- instances
- image_types
- resources_disk_sata
- clustering_roles
- images_expiry
- resources_network_firmware
- backup_compression_algorithm
- ceph_data_pool_name
- container_syscall_intercept_mount
- compression_squashfs
- container_raw_mount
- container_nic_routed
- container_syscall_intercept_mount_fuse
- container_disk_ceph
- virtual-machines
- image_profiles
- clustering_architecture
- resources_disk_id
- storage_lvm_stripes
- vm_boot_priority
- unix_hotplug_devices
- api_filtering
- instance_nic_network
- clustering_sizing
- firewall_driver
- projects_limits
- container_syscall_intercept_hugetlbfs
- limits_hugepages
- container_nic_routed_gateway
- projects_restrictions
- custom_volume_snapshot_expiry
- volume_snapshot_scheduling
- trust_ca_certificates
- snapshot_disk_usage
- clustering_edit_roles
- container_nic_routed_host_address
- container_nic_ipvlan_gateway
- resources_usb_pci
- resources_cpu_threads_numa
- resources_cpu_core_die
- api_os
- container_nic_routed_host_table
- container_nic_ipvlan_host_table
- container_nic_ipvlan_mode
- resources_system
- images_push_relay
- network_dns_search
- container_nic_routed_limits
- instance_nic_bridged_vlan
- network_state_bond_bridge
- usedby_consistency
- custom_block_volumes
- clustering_failure_domains
- resources_gpu_mdev
- console_vga_type
- projects_limits_disk
- network_type_macvlan
- network_type_sriov
- container_syscall_intercept_bpf_devices
- network_type_ovn
- projects_networks
- projects_networks_restricted_uplinks
- custom_volume_backup
- backup_override_name
- storage_rsync_compression
- network_type_physical
- network_ovn_external_subnets
- network_ovn_nat
- network_ovn_external_routes_remove
- tpm_device_type
- storage_zfs_clone_copy_rebase
- gpu_mdev
- resources_pci_iommu
- resources_network_usb
- resources_disk_address
- network_physical_ovn_ingress_mode
- network_ovn_dhcp
- network_physical_routes_anycast
- projects_limits_instances
- network_state_vlan
- instance_nic_bridged_port_isolation
- instance_bulk_state_change
- network_gvrp
- instance_pool_move
- gpu_sriov
- pci_device_type
- storage_volume_state
- network_acl
- migration_stateful
- disk_state_quota
- storage_ceph_features
- projects_compression
- projects_images_remote_cache_expiry
- certificate_project
- network_ovn_acl
- projects_images_auto_update
- projects_restricted_cluster_target
- images_default_architecture
- network_ovn_acl_defaults
- gpu_mig
- project_usage
- network_bridge_acl
- warnings
- projects_restricted_backups_and_snapshots
- clustering_join_token
- clustering_description
- server_trusted_proxy
- clustering_update_cert
- storage_api_project
- server_instance_driver_operational
- server_supported_storage_drivers
- event_lifecycle_requestor_address
- resources_gpu_usb
- clustering_evacuation
- network_ovn_nat_address
- network_bgp
- network_forward
- custom_volume_refresh
- network_counters_errors_dropped
- metrics
- image_source_project
- clustering_config
- network_peer
- linux_sysctl
- network_dns
- ovn_nic_acceleration
- certificate_self_renewal
- instance_project_move
- storage_volume_project_move
- cloud_init
- network_dns_nat
- database_leader
- instance_all_projects
- clustering_groups
- ceph_rbd_du
- instance_get_full
- qemu_metrics
- gpu_mig_uuid
- event_project
- clustering_evacuation_live
- instance_allow_inconsistent_copy
- network_state_ovn
- storage_volume_api_filtering
- image_restrictions
- storage_zfs_export
- network_dns_records
- storage_zfs_reserve_space
- network_acl_log
- storage_zfs_blocksize
- metrics_cpu_seconds
- instance_snapshot_never
- certificate_token
- instance_nic_routed_neighbor_probe
- event_hub
- agent_nic_config
- projects_restricted_intercept
- metrics_authentication
- images_target_project
- cluster_migration_inconsistent_copy
- cluster_ovn_chassis
- container_syscall_intercept_sched_setscheduler
- storage_lvm_thinpool_metadata_size
- storage_volume_state_total
- instance_file_head
- instances_nic_host_name
- image_copy_profile
- container_syscall_intercept_sysinfo
- clustering_evacuation_mode
- resources_pci_vpd
- qemu_raw_conf
- storage_cephfs_fscache
- network_load_balancer
- vsock_api
- instance_ready_state
- network_bgp_holdtime
- storage_volumes_all_projects
- metrics_memory_oom_total
- storage_buckets
- storage_buckets_create_credentials
- metrics_cpu_effective_total
- projects_networks_restricted_access
- storage_buckets_local
- loki
- acme
- internal_metrics
- cluster_join_token_expiry
- remote_token_expiry
- init_preseed
- storage_volumes_created_at
- cpu_hotplug
- projects_networks_zones
- network_txqueuelen
- cluster_member_state
- instances_placement_scriptlet
- storage_pool_source_wipe
- zfs_block_mode
- instance_generation_id
- disk_io_cache
- amd_sev
- storage_pool_loop_resize
- migration_vm_live
- ovn_nic_nesting
- oidc
- network_ovn_l3only
- ovn_nic_acceleration_vdpa
- cluster_healing
- instances_state_total
- auth_user
- security_csm
- instances_rebuild
- numa_cpu_placement
- custom_volume_iso
- network_allocations
- storage_api_remote_volume_snapshot_copy
- zfs_delegate
- operations_get_query_all_projects
- metadata_configuration
- syslog_socket
- event_lifecycle_name_and_project
- instances_nic_limits_priority
- disk_initial_volume_configuration
- operation_wait
- cluster_internal_custom_volume_copy
- disk_io_bus
- storage_cephfs_create_missing
- instance_move_config
- ovn_ssl_config
- init_preseed_storage_volumes
- metrics_instances_count
- server_instance_type_info
- resources_disk_mounted
- server_version_lts
- oidc_groups_claim
- loki_config_instance
- storage_volatile_uuid
- import_instance_devices
- instances_uefi_vars
- instances_migration_stateful
- container_syscall_filtering_allow_deny_syntax
- access_management
- vm_disk_io_limits
- storage_volumes_all
- instances_files_modify_permissions
- image_restriction_nesting
- container_syscall_intercept_finit_module
- device_usb_serial
- network_allocate_external_ips
- explicit_trust_token
- shared_custom_block_volumes
- instance_import_conversion
- instance_create_start
- instance_protection_start
- devlxd_images_vm
- disk_io_bus_virtio_blk
- metrics_api_requests
- projects_limits_disk_pool
- ubuntu_pro_guest_attach
- metadata_configuration_entity_types
- access_management_tls
- network_allocations_ovn_uplink
- network_ovn_uplink_vlan
- state_logical_cpus
- vm_limits_cpu_pin_strategy
- gpu_cdi
- images_all_projects
- metadata_configuration_scope
- unix_device_hotplug_ownership_inherit
- unix_device_hotplug_subsystem_device_option
- storage_ceph_osd_pool_size
- network_get_target
- network_zones_all_projects
- vm_root_volume_attachment
- projects_limits_uplink_ips
- entities_with_entitlements
- profiles_all_projects
- storage_driver_powerflex
- storage_driver_pure
- cloud_init_ssh_keys
- oidc_scopes
- project_default_network_and_storage
- client_cert_presence
- clustering_groups_used_by
- container_bpf_delegation
- override_snapshot_profiles_on_copy
- resources_device_fs_uuid
- backup_metadata_version
- storage_buckets_all_projects
- network_acls_all_projects
- networks_all_projects
- clustering_restore_skip_mode
- disk_io_threads_virtiofsd
api_status: stable
api_version: "1.0"
auth: trusted
public: false
auth_methods:
- tls
client_certificate: false
auth_user_name: root
auth_user_method: unix
environment:
addresses:
- 192.168.3.61:8443
architectures:
- x86_64
- i686
backup_metadata_version_range:
- 1
- 2
certificate: |
-----BEGIN CERTIFICATE-----
MIIB3zCCAWagAwIBAgIQIhGyndEsNn/YiotSZbHqPzAKBggqhkjOPQQDAzAjMQww
CgYDVQQKEwNMWEQxEzARBgNVBAMMCnJvb3RAbm9kZTEwHhcNMjUwNjIwMDk0ODUz
WhcNMzUwNjE4MDk0ODUzWjAjMQwwCgYDVQQKEwNMWEQxEzARBgNVBAMMCnJvb3RA
bm9kZTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARnNlVv/FEBcuTmYRbie2svVE/M
mxkP+diMLSvvOs6OC4/MuWD7YO/bZeb3DDvpir/7w705jZvAdSCATsi0VcFIezWk
9uCl/JQQeK2kuN2EfnhwelfwuCZ9VcjZLv7qprijXzBdMA4GA1UdDwEB/wQEAwIF
oDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMCgGA1UdEQQhMB+C
BW5vZGUxhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAoGCCqGSM49BAMDA2cAMGQC
MB94wYiMzh5bHLWwLbPczH/PBXWIa2yxsdW1QBb4892I7ym8Nuol1WE8Y/qlzCDX
eAIwWDZ3smIFDNk+ZPB2N+53g4pfvN9se7hFhPHlwOUc7uvW6JMJelcHDPSIyBLy
u8WF
-----END CERTIFICATE-----
certificate_fingerprint: 007b044de35d01b69a09be3821dbbc7441e16cda1cb00187947f6c1e50fe6b95
driver: lxc | qemu
driver_version: 6.0.4 | 8.2.2
instance_types:
- container
- virtual-machine
firewall: nftables
kernel: Linux
kernel_architecture: x86_64
kernel_features:
bpf_token: "false"
idmapped_mounts: "true"
netnsid_getifaddrs: "true"
seccomp_listener: "true"
seccomp_listener_continue: "true"
uevent_injection: "true"
unpriv_binfmt: "true"
unpriv_fscaps: "true"
kernel_version: 6.8.0-62-generic
lxc_features:
cgroup2: "true"
core_scheduling: "true"
devpts_fd: "true"
idmapped_mounts_v2: "true"
mount_injection_file: "true"
network_gateway_device_route: "true"
network_ipvlan: "true"
network_l2proxy: "true"
network_phys_macvlan_mtu: "true"
network_veth_router: "true"
pidfd: "true"
seccomp_allow_deny_syntax: "true"
seccomp_notify: "true"
seccomp_proxy_send_notify_fd: "true"
os_name: Ubuntu
os_version: "24.04"
project: default
server: lxd
server_clustered: true
server_event_mode: full-mesh
server_name: node1
server_pid: 2201
server_version: "6.4"
server_lts: false
storage: zfs
storage_version: 2.2.2-0ubuntu9.2
storage_supported_drivers:
- name: cephfs
version: 19.2.0
remote: true
- name: cephobject
version: 19.2.0
remote: true
- name: dir
version: "1"
remote: false
- name: lvm
version: 2.03.16(2) (2022-05-18) / 1.02.185 (2022-05-18) / 4.48.0
remote: false
- name: powerflex
version: 2.8 (nvme-cli)
remote: true
- name: pure
version: 2.1.9 (iscsiadm) / 2.8 (nvme-cli)
remote: true
- name: btrfs
version: 6.6.3
remote: false
- name: ceph
version: 19.2.0
remote: true
- name: zfs
version: 2.2.2-0ubuntu9.2
remote: falseIssue description
I have a three node cluster running 6.4
I have c1 running on node1, c2 on node2 and c3 on node3.
In the web UI I pressed evacuate on node 3 and immediately pressed restore.
Then the following happened:
c3 was stopped and moved to node1. I'm able to start and stop it on this node1. c3 should have moved back to node3 when i pressed restore on node3 but it didn't. c3 is currently stopped but i'm unable to migrate it back to node3 because the state is supposedly running. I'm able to freely move it between node1 and node2, but not back to node3.
Actions on the cli:
root@node1:~# lxc ls
+------+---------+-------------------+------+-----------+-----------+----------+
| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS | LOCATION |
+------+---------+-------------------+------+-----------+-----------+----------+
| c1 | STOPPED | | | CONTAINER | 0 | node3 |
+------+---------+-------------------+------+-----------+-----------+----------+
| c2 | STOPPED | | | CONTAINER | 0 | node1 |
+------+---------+-------------------+------+-----------+-----------+----------+
| c3 | RUNNING | 172.16.3.2 (eth0) | | CONTAINER | 0 | node3 |
+------+---------+-------------------+------+-----------+-----------+----------+
root@node1:~# lxc show c1
Error: unknown command "show" for "lxc"
Did you mean this?
stop
root@node1:~# lxc config show c1
architecture: x86_64
config:
image.architecture: amd64
image.description: ubuntu 24.04 LTS amd64 (release) (20250610)
image.label: release
image.os: ubuntu
image.release: noble
image.serial: "20250610"
image.type: squashfs
image.version: "24.04"
volatile.base_image: 9c73fb6ca4c2ae7dd357696a2e16ff8ac2f140090deab77b95a24add2386a55a
volatile.cloud-init.instance-id: f6498c3d-58eb-4b6e-bfab-cc7d5368a0c3
volatile.eth-1.hwaddr: 00:16:3e:3e:a2:2e
volatile.eth-1.name: eth0
volatile.evacuate.origin: node1
volatile.idmap.base: "0"
volatile.idmap.current: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
volatile.idmap.next: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
volatile.last_state.idmap: '[]'
volatile.last_state.power: STOPPED
volatile.last_state.ready: "false"
volatile.uuid: 1ef36f44-e34e-438a-84a9-4ea32dd81b43
volatile.uuid.generation: 1ef36f44-e34e-438a-84a9-4ea32dd81b43
devices:
eth-1:
network: ovn1
type: nic
ephemeral: false
profiles:
- default
stateful: false
description: ""
root@node1:~# lxc move c1 --target node1
Error: Failed requesting instance create on destination: Failed getting exclusive access to instance: Instance is running
root@node1:~# lxc move c1 --target node2
root@node1:~# lxc move c1 --target node1
Error: Failed requesting instance create on destination: Failed getting exclusive access to instance: Instance is running
root@node1:~# lxc move c1 --target node3
root@node1:~# lxc move c1 --target node2
root@node1:~# lxc move c1 --target node1
Error: Failed requesting instance create on destination: Failed getting exclusive access to instance: Instance is running
root@node1:~#
Steps to reproduce
I was able to reproduce this with c2 on node2.
- I evacuated node2
- restored node2 while evacuation is still running.
- c2 moved to node3 but never returned to node2
- c2 is now in the same state as c1. It's able to move to node3 and node1 freely but unable to move to node2.
I was also able to reproduce this on the cli (Be it with 2 terminals open).
Information to attach
- Any relevant kernel output (
dmesg) - Instance log (
lxc info NAME --show-log) - Instance configuration (
lxc config show NAME --expanded) - Main daemon log (at
/var/log/lxd/lxd.logor/var/snap/lxd/common/lxd/logs/lxd.log) - Output of the client with
--debug - Output of the daemon with
--debug(or uselxc monitorwhile reproducing the issue)
Metadata
Metadata
Assignees
Labels
JiraTriggers the synchronization of a GitHub issue in JiraTriggers the synchronization of a GitHub issue in Jira