[Unit] Description=libvirt QEMU daemon Documentation=man:virtqemud(8) Documentation=https://libvirt.org/ BindsTo=virtqemud.socket Wants=virtqemud-ro.socket Wants=virtqemud-admin.socket After=virtqemud.socket After=virtqemud-ro.socket After=virtqemud-admin.socket Conflicts=libvirtd.service After=libvirtd.service After=network.target After=dbus.service After=apparmor.service Requires=virtlogd.socket Wants=virtlockd.socket After=virtlogd.socket After=virtlockd.socket Wants=systemd-machined.service After=systemd-machined.service After=remote-fs.target [Service] Type=notify Environment=VIRTQEMUD_ARGS="--timeout 120" EnvironmentFile=-/etc/sysconfig/virtqemud ExecStart=/usr/bin/virtqemud $VIRTQEMUD_ARGS ExecReload=/bin/kill -HUP $MAINPID Restart=on-failure KillMode=process # Raise hard limits to match behaviour of systemd >= 240. # During startup, daemon will set soft limit to match hard limit # per systemd recommendations LimitNOFILE=1024:524288 # The cgroups pids controller can limit the number of tasks started by # the daemon, which can limit the number of domains for some hypervisors. # A conservative default of 8 tasks per guest results in a TasksMax of # 32k to support 4096 guests. TasksMax=32768 # With cgroups v2 there is no devices controller anymore, we have to use # eBPF to control access to devices. In order to do that we create a eBPF # hash MAP which locks memory. The default map size for 64 devices together # with program takes 12k per guest. After rounding up we will get 64M to # support 4096 guests. LimitMEMLOCK=64M [Install] WantedBy=multi-user.target Also=virtqemud.socket Also=virtqemud-ro.socket Also=virtqemud-admin.socket Also=virtlogd.socket Also=virtlockd.socket