[Pkg-libvirt-maintainers] Bug#1089616: Regression: starting a non-persistent VM with a TPM segfaults
hartmans at debian.org
hartmans at debian.org
Mon Dec 9 23:54:09 GMT 2024
Package: libvirt-daemon-driver-qemu
Version: 10.10.0-1
Severity: important
X-Debbugs-Cc: tim.miller at hadronindustries.com
With 10.8.0.0-1, my VM works fine.
I tend to use virsh create without virsh define.
I have my own automation (https://github.com/hadron/carthage ) on top of libvirt, and having VMs persisted within libvirt creates complexity I do not need.
Unfortunately, when I try a VM configuration that works with the old version with 10.10.0-1, it segfaults:
#0 0x00007fb94dceff3e in qemuExtDevicesStart (driver=driver at entry=0x7fb8f403e330, vm=vm at entry=0x7fb94001fab0, incomingMigration=false) at ../../src/qemu/qemu_extdevice.c:193
tpm = 0x7fb9400173a0
persistentTPMDef = <optimized out>
persistentDef = 0x0
def = 0x7fb940013b10
i = 0
qemu_extdev.c:193:
for (i = 0; i < def->ntpms; i++) {
virDomainTPMDef *tpm = def->tpms[i];
virDomainTPMDef *persistentTPMDef = persistentDef->tpms[i];
Obviously that last line is going to segfault with persistentDef 0.
The code in 10.8.0-1 does not refer to the persistentDef:
for (i = 0; i < def->ntpms; i++) {
virDomainTPMDef *tpm = def->tpms[i];
if (tpm->type == VIR_DOMAIN_TPM_TYPE_EMULATOR &&
qemuExtTPMStart(driver, vm, tpm, incomingMigration) < 0)
return -1;
}
I have not looked into the upstream change.
I'd strongly prefer that things continue to work with a created but not defined VM.
It's very useful in my situation.
If upstream is going to insist that VMs with TPM be defined, better error handling than a segfault is required.
I've attached a full traceback, although I don't think it is useful beyond what I have shown.
To reproduce I think all you need to do is include a stanza like the following in your devices section of the config:
<tpm model='tpm-crb'>
<backend type='emulator' version='2.0'/>
</tpm>
----------------------------------------
#0 0x00007fb94dceff3e in qemuExtDevicesStart (driver=driver at entry=0x7fb8f403e330, vm=vm at entry=0x7fb94001fab0, incomingMigration=false) at ../../src/qemu/qemu_extdevice.c:193
tpm = 0x7fb9400173a0
persistentTPMDef = <optimized out>
persistentDef = 0x0
def = 0x7fb940013b10
i = 0
#1 0x00007fb94dd5250c in qemuProcessLaunch (conn=conn at entry=0x7fb940002b30, driver=driver at entry=0x7fb8f403e330, vm=vm at entry=0x7fb94001fab0, asyncJob=asyncJob at entry=VIR_ASYNC_JOB_START, incoming=incoming at entry=0x0, snapshot=snapshot at entry=0x0, vmop=VIR_NETDEV_VPORT_PROFILE_OP_CREATE, flags=17) at ../../src/qemu/qemu_process.c:7836
ret = -1
rv = <optimized out>
logfile = 149
logCtxt = 0x7fb94003fc70
priv = 0x7fb9400040d0
cmd = 0x0
hookData = {vm = 0x7fb94001fab0, driver = 0x7fb8f403e330, cfg = 0x7fb8f4055360}
cfg = 0x7fb8f4055360
nnicindexes = 0
nicindexes = 0x0
maxMemLock = 0
incomingMigrationExtDevices = false
__func__ = "qemuProcessLaunch"
__FUNCTION__ = "qemuProcessLaunch"
#2 0x00007fb94dd587d8 in qemuProcessStart (conn=conn at entry=0x7fb940002b30, driver=driver at entry=0x7fb8f403e330, vm=0x7fb94001fab0, updatedCPU=updatedCPU at entry=0x0, asyncJob=asyncJob at entry=VIR_ASYNC_JOB_START, migrateFrom=migrateFrom at entry=0x0, migrateFd=-1, migratePath=0x0, snapshot=0x0, vmop=VIR_NETDEV_VPORT_PROFILE_OP_CREATE, flags=17) at ../../src/qemu/qemu_process.c:8317
priv = 0x7fb9400040d0
incoming = <optimized out>
stopFlags = <optimized out>
relabel = false
relabelSavedState = false
ret = -1
rv = <optimized out>
__func__ = "qemuProcessStart"
__FUNCTION__ = "qemuProcessStart"
stop = <optimized out>
#3 0x00007fb94dce08c1 in qemuDomainCreateXML (conn=0x7fb940002b30, xml=<optimized out>, flags=0) at ../../src/qemu/qemu_driver.c:1610
driver = 0x7fb8f403e330
def = 0x0
vm = 0x7fb94001fab0
dom = 0x0
event = 0x0
event2 = 0x0
start_flags = 1
parse_flags = 130
__FUNCTION__ = "qemuDomainCreateXML"
#4 0x00007fb95d130c93 in virDomainCreateXML (conn=0x7fb940002b30, xmlDesc=0x7fb940002f10 "\n<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>\n <name>test-win2.test-pacc.internal</name>\n <uuid>6b541023-93ab-55ed-a5a2-1314cedab813</uuid>\n <metadata>\n <carthage:"..., flags=0) at ../../src/libvirt-domain.c:188
ret = <optimized out>
__func__ = "virDomainCreateXML"
__FUNCTION__ = "virDomainCreateXML"
#5 0x0000557323e2fe22 in remoteDispatchDomainCreateXML (server=0x557346412540, msg=0x55734642c5b0, client=0x557346432a10, rerr=0x7fb9571ff9f0, args=0x7fb940002a60, ret=0x7fb940004b40) at src/remote/remote_daemon_dispatch_stubs.h:5186
rv = -1
dom = 0x0
conn = <optimized out>
cleanup = <optimized out>
rv = <optimized out>
dom = <optimized out>
conn = <optimized out>
cleanup = <optimized out>
#6 remoteDispatchDomainCreateXMLHelper (server=0x557346412540, client=0x557346432a10, msg=0x55734642c5b0, rerr=0x7fb9571ff9f0, args=0x7fb940002a60, ret=0x7fb940004b40) at src/remote/remote_daemon_dispatch_stubs.h:5167
rv = <optimized out>
__func__ = "remoteDispatchDomainCreateXMLHelper"
#7 0x00007fb95d0149fa in virNetServerProgramDispatchCall (prog=0x5573464266d0, server=0x557346412540, client=0x557346432a10, msg=0x55734642c5b0) at ../../src/rpc/virnetserverprogram.c:423
ret = 0x7fb940004b40 ""
rv = -1
i = <optimized out>
identity = 0x7fb918000bb0
arg = 0x7fb940002a60 "\020/"
dispatcher = 0x557323e8a400 <remoteProcs+480>
rerr = {code = 0, domain = 0, message = 0x0, level = 0, dom = 0x0, str1 = 0x0, str2 = 0x0, str3 = 0x0, int1 = 0, int2 = 0, net = 0x0}
arg = <optimized out>
ret = <optimized out>
rv = <optimized out>
dispatcher = <optimized out>
rerr = <optimized out>
i = <optimized out>
identity = <optimized out>
__FUNCTION__ = "virNetServerProgramDispatchCall"
error = <optimized out>
__n = <optimized out>
__s = <optimized out>
__p = <optimized out>
__n = <optimized out>
__s = <optimized out>
__p = <optimized out>
_pp = <optimized out>
_ptr = <optimized out>
#8 virNetServerProgramDispatch (prog=0x5573464266d0, server=server at entry=0x557346412540, client=client at entry=0x557346432a10, msg=msg at entry=0x55734642c5b0) at ../../src/rpc/virnetserverprogram.c:299
ret = -1
rerr = {code = 0, domain = 0, message = 0x0, level = 0, dom = 0x0, str1 = 0x0, str2 = 0x0, str3 = 0x0, int1 = 0, int2 = 0, net = 0x0}
__func__ = "virNetServerProgramDispatch"
__FUNCTION__ = "virNetServerProgramDispatch"
#9 0x00007fb95d01a642 in virNetServerProcessMsg (srv=srv at entry=0x557346412540, client=0x557346432a10, prog=<optimized out>, msg=0x55734642c5b0) at ../../src/rpc/virnetserver.c:135
__func__ = "virNetServerProcessMsg"
#10 0x00007fb95d01a981 in virNetServerHandleJob (jobOpaque=0x5573463ee260, opaque=0x557346412540) at ../../src/rpc/virnetserver.c:155
srv = 0x557346412540
job = 0x5573463ee260
__func__ = "virNetServerHandleJob"
_pp = <optimized out>
_ptr = <optimized out>
#11 0x00007fb95cf4b3e5 in virThreadPoolWorker (opaque=<optimized out>) at ../../src/util/virthreadpool.c:164
data = 0x0
pool = 0x557346412620
cond = 0x557346412690
priority = false
curWorkers = 0x557346412708
maxLimit = 0x5573464126f0
job = <optimized out>
#12 0x00007fb95cf4a998 in virThreadHelper (data=<optimized out>) at ../../src/util/virthread.c:256
args = <optimized out>
local = {func = 0x7fb95cf4b2d0 <virThreadPoolWorker>, name = 0x5573463f23a0 "rpc-libvirtd", worker = true, opaque = 0x5573463f2460}
thname = 0x7fb940000b70 "rpc-libvirtd"
maxname = 15
#13 0x00007fb95c8faf52 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:447
ret = <optimized out>
pd = <optimized out>
out = <optimized out>
unwind_buf = {cancel_jmp_buf = {{jmp_buf = {140434007393984, 3476436494021025408, -136, 17, 140729017025504, 140433999003648, -3509317114524344704, -3509305155466043776}, mask_was_saved = 0}}, priv = {pad = {0x0, 0x0, 0x0, 0x0}, data = {prev = 0x0, cleanup = 0x0, canceltype = 0}}}
not_first_call = <optimized out>
#14 0x00007fb95c979678 in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
No locals.
More information about the Pkg-libvirt-maintainers
mailing list