v7.2.4 release

-----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEe3O61ovnosKJMUsicBtPaxppPlkFAmSp1ksPHG1qdEB0bHMu
 bXNrLnJ1AAoJEHAbT2saaT5ZaU4IAKVA9cUkF8IORzcZ8iXy6kTjLNYXd458nURO
 PkrZ0DZfnUJTmyUDoR5gjZrZhRvvHGSyAnwBvd1WLeFZgD2yD2i2ZZczfI3uc3ov
 LkW6mMJRVEWFlszA6SCbFtZ+Z9hgbJidQmb+SxxxnCmrnQF48ysQ0Feg/B4TObMt
 Ej/xMEF52Ujr4VDe3Iq6dXp/AT8NwShEEc1VWFXbNJCNp2BM31FC21cFENPiv2y3
 2E8n+wOGxSSayArOEkgov55Mre9M7L79hOhRXgp0EmJP/nxmm9GTv5rETrT5USr7
 rZzDrsfS3muSArsGd7J4NkvyrParNmIBjSrOK0zX5p8pg9pVJ7U=
 =o+if
 -----END PGP SIGNATURE-----

Merge tag 'v7.2.4' into sync/qemu-7.2.0

v7.2.4 release
This commit is contained in:
Matt Borgerson 2023-07-17 03:29:42 -07:00
commit 0f27526006
165 changed files with 1839 additions and 846 deletions

View file

@ -109,8 +109,8 @@ crash-test-debian:
IMAGE: debian-amd64
script:
- cd build
- make check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386
- make NINJA=":" check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
build-system-fedora:
extends: .native_build_job_template
@ -155,7 +155,7 @@ crash-test-fedora:
IMAGE: fedora
script:
- cd build
- make check-venv
- make NINJA=":" check-venv
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
- tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32

View file

@ -1 +1 @@
7.2.0
7.2.4

View file

@ -1826,7 +1826,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
} else /* if (prot & PAGE_READ) */ {
tlb_addr = tlbe->addr_read;
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
if (!VICTIM_TLB_HIT(addr_read, addr)) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_LOAD, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);

View file

@ -37,8 +37,15 @@
// #define DEBUG_VERBOSE
/* CURL 7.85.0 switches to a string based API for specifying
* the desired protocols.
*/
#if LIBCURL_VERSION_NUM >= 0x075500
#define PROTOCOLS "HTTP,HTTPS,FTP,FTPS"
#else
#define PROTOCOLS (CURLPROTO_HTTP | CURLPROTO_HTTPS | \
CURLPROTO_FTP | CURLPROTO_FTPS)
#endif
#define CURL_NUM_STATES 8
#define CURL_NUM_ACB 8
@ -509,9 +516,18 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state)
* obscure protocols. For example, do not allow POP3/SMTP/IMAP see
* CVE-2013-0249.
*
* Restricting protocols is only supported from 7.19.4 upwards.
* Restricting protocols is only supported from 7.19.4 upwards. Note:
* version 7.85.0 deprecates CURLOPT_*PROTOCOLS in favour of a string
* based CURLOPT_*PROTOCOLS_STR API.
*/
#if LIBCURL_VERSION_NUM >= 0x071304
#if LIBCURL_VERSION_NUM >= 0x075500
if (curl_easy_setopt(state->curl,
CURLOPT_PROTOCOLS_STR, PROTOCOLS) ||
curl_easy_setopt(state->curl,
CURLOPT_REDIR_PROTOCOLS_STR, PROTOCOLS)) {
goto err;
}
#elif LIBCURL_VERSION_NUM >= 0x071304
if (curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS) ||
curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS)) {
goto err;
@ -669,7 +685,12 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
const char *file;
const char *cookie;
const char *cookie_secret;
double d;
/* CURL >= 7.55.0 uses curl_off_t for content length instead of a double */
#if LIBCURL_VERSION_NUM >= 0x073700
curl_off_t cl;
#else
double cl;
#endif
const char *secretid;
const char *protocol_delimiter;
int ret;
@ -796,27 +817,36 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
}
if (curl_easy_perform(state->curl))
goto out;
if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d)) {
/* CURL 7.55.0 deprecates CURLINFO_CONTENT_LENGTH_DOWNLOAD in favour of
* the *_T version which returns a more sensible type for content length.
*/
#if LIBCURL_VERSION_NUM >= 0x073700
if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD_T, &cl)) {
goto out;
}
#else
if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &cl)) {
goto out;
}
#endif
/* Prior CURL 7.19.4 return value of 0 could mean that the file size is not
* know or the size is zero. From 7.19.4 CURL returns -1 if size is not
* known and zero if it is really zero-length file. */
#if LIBCURL_VERSION_NUM >= 0x071304
if (d < 0) {
if (cl < 0) {
pstrcpy(state->errmsg, CURL_ERROR_SIZE,
"Server didn't report file size.");
goto out;
}
#else
if (d <= 0) {
if (cl <= 0) {
pstrcpy(state->errmsg, CURL_ERROR_SIZE,
"Unknown file size or zero-length file.");
goto out;
}
#endif
s->len = d;
s->len = cl;
if ((!strncasecmp(s->url, "http://", strlen("http://"))
|| !strncasecmp(s->url, "https://", strlen("https://")))

View file

@ -2087,6 +2087,9 @@ static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
flags |= BDRV_REQ_MAY_UNMAP;
}
/* Can't use optimization hint with bufferless zero write */
flags &= ~BDRV_REQ_REGISTERED_BUF;
}
if (ret < 0) {

View file

@ -268,6 +268,7 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
timer_mod(&iTask->retry_timer,
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time);
iTask->do_retry = 1;
return;
} else if (status == SCSI_STATUS_CHECK_CONDITION) {
int error = iscsi_translate_sense(&task->sense);
if (error == EAGAIN) {

View file

@ -213,15 +213,17 @@ void hmp_commit(Monitor *mon, const QDict *qdict)
error_report("Device '%s' not found", device);
return;
}
if (!blk_is_available(blk)) {
error_report("Device '%s' has no medium", device);
return;
}
bs = bdrv_skip_implicit_filters(blk_bs(blk));
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (!blk_is_available(blk)) {
error_report("Device '%s' has no medium", device);
aio_context_release(aio_context);
return;
}
ret = bdrv_commit(bs);
aio_context_release(aio_context);

View file

@ -115,7 +115,7 @@ static int update_header_sync(BlockDriverState *bs)
return bdrv_flush(bs->file->bs);
}
static inline void bitmap_table_to_be(uint64_t *bitmap_table, size_t size)
static inline void bitmap_table_bswap_be(uint64_t *bitmap_table, size_t size)
{
size_t i;
@ -1401,9 +1401,10 @@ static int store_bitmap(BlockDriverState *bs, Qcow2Bitmap *bm, Error **errp)
goto fail;
}
bitmap_table_to_be(tb, tb_size);
bitmap_table_bswap_be(tb, tb_size);
ret = bdrv_pwrite(bs->file, tb_offset, tb_size * sizeof(tb[0]), tb, 0);
if (ret < 0) {
bitmap_table_bswap_be(tb, tb_size);
error_setg_errno(errp, -ret, "Failed to write bitmap '%s' to file",
bm_name);
goto fail;

View file

@ -980,7 +980,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
sector_write = merged_sector;
} else if (i == sectors - 1 && trailing_length) {
/* partial sector at the end of the buffer */
ret = bdrv_pread(bs->file, file_offset,
ret = bdrv_pread(bs->file, file_offset + trailing_length,
VHDX_LOG_SECTOR_SIZE - trailing_length,
merged_sector + trailing_length, 0);
if (ret < 0) {

View file

@ -152,12 +152,22 @@ void blockdev_mark_auto_del(BlockBackend *blk)
JOB_LOCK_GUARD();
for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
if (block_job_has_bdrv(job, blk_bs(blk))) {
do {
job = block_job_next_locked(NULL);
while (job && (job->job.cancelled ||
job->job.deferred_to_main_loop ||
!block_job_has_bdrv(job, blk_bs(blk))))
{
job = block_job_next_locked(job);
}
if (job) {
/*
* This drops the job lock temporarily and polls, so we need to
* restart processing the list from the start after this.
*/
job_cancel_locked(&job->job, false);
}
}
} while (job);
dinfo->auto_del = 1;
}

View file

@ -1065,6 +1065,7 @@ static void char_socket_finalize(Object *obj)
qio_net_listener_set_client_func_full(s->listener, NULL, NULL,
NULL, chr->gcontext);
object_unref(OBJECT(s->listener));
s->listener = NULL;
}
if (s->tls_creds) {
object_unref(OBJECT(s->tls_creds));

2
configure vendored
View file

@ -2430,7 +2430,7 @@ echo "QEMU_OBJCFLAGS=$QEMU_OBJCFLAGS" >> $config_host_mak
echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
echo "GLIB_LIBS=$glib_libs" >> $config_host_mak
echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak
echo "GLIB_VERSION=$(pkg-config --modversion glib-2.0)" >> $config_host_mak
echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak
echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak
echo "EXESUF=$EXESUF" >> $config_host_mak

View file

@ -233,8 +233,8 @@ Use the more generic event ``DEVICE_UNPLUG_GUEST_ERROR`` instead.
System emulator machines
------------------------
Arm ``virt`` machine ``dtb-kaslr-seed`` property
''''''''''''''''''''''''''''''''''''''''''''''''
Arm ``virt`` machine ``dtb-kaslr-seed`` property (since 7.1)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
The ``dtb-kaslr-seed`` property on the ``virt`` board has been
deprecated; use the new name ``dtb-randomness`` instead. The new name

View file

@ -2,7 +2,7 @@ Multi-process QEMU
==================
This document describes how to configure and use multi-process qemu.
For the design document refer to docs/devel/qemu-multiprocess.
For the design document refer to docs/devel/multi-process.rst.
1) Configuration
----------------

View file

@ -5135,7 +5135,7 @@ float32 float32_exp2(float32 a, float_status *status)
float64_unpack_canonical(&rp, float64_one, status);
for (i = 0 ; i < 15 ; i++) {
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
rp = *parts_muladd(&tp, &xp, &rp, 0, status);
rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
xnp = *parts_mul(&xnp, &xp, status);
}

View file

@ -26,6 +26,7 @@
#include "qemu/xattr.h"
#include "9p-iov-marshal.h"
#include "hw/9pfs/9p-proxy.h"
#include "hw/9pfs/9p-util.h"
#include "fsdev/9p-iov-marshal.h"
#define PROGNAME "virtfs-proxy-helper"
@ -338,6 +339,28 @@ static void resetugid(int suid, int sgid)
}
}
/*
* Open regular file or directory. Attempts to open any special file are
* rejected.
*
* returns file descriptor or -1 on error
*/
static int open_regular(const char *pathname, int flags, mode_t mode)
{
int fd;
fd = open(pathname, flags, mode);
if (fd < 0) {
return fd;
}
if (close_if_special_file(fd) < 0) {
return -1;
}
return fd;
}
/*
* send response in two parts
* 1) ProxyHeader
@ -682,7 +705,7 @@ static int do_create(struct iovec *iovec)
if (ret < 0) {
goto unmarshal_err_out;
}
ret = open(path.data, flags, mode);
ret = open_regular(path.data, flags, mode);
if (ret < 0) {
ret = -errno;
}
@ -707,7 +730,7 @@ static int do_open(struct iovec *iovec)
if (ret < 0) {
goto err_out;
}
ret = open(path.data, flags);
ret = open_regular(path.data, flags, 0);
if (ret < 0) {
ret = -errno;
}

View file

@ -13,6 +13,8 @@
#ifndef QEMU_9P_UTIL_H
#define QEMU_9P_UTIL_H
#include "qemu/error-report.h"
#ifdef O_PATH
#define O_PATH_9P_UTIL O_PATH
#else
@ -112,6 +114,38 @@ static inline void close_preserve_errno(int fd)
errno = serrno;
}
/**
* close_if_special_file() - Close @fd if neither regular file nor directory.
*
* @fd: file descriptor of open file
* Return: 0 on regular file or directory, -1 otherwise
*
* CVE-2023-2861: Prohibit opening any special file directly on host
* (especially device files), as a compromised client could potentially gain
* access outside exported tree under certain, unsafe setups. We expect
* client to handle I/O on special files exclusively on guest side.
*/
static inline int close_if_special_file(int fd)
{
struct stat stbuf;
if (fstat(fd, &stbuf) < 0) {
close_preserve_errno(fd);
return -1;
}
if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) {
error_report_once(
"9p: broken or compromised client detected; attempt to open "
"special file (i.e. neither regular file, nor directory)"
);
close(fd);
errno = ENXIO;
return -1;
}
return 0;
}
static inline int openat_dir(int dirfd, const char *name)
{
return openat(dirfd, name,
@ -146,6 +180,10 @@ again:
return -1;
}
if (close_if_special_file(fd) < 0) {
return -1;
}
serrno = errno;
/* O_NONBLOCK was only needed to open the file. Let's drop it. We don't
* do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat()

View file

@ -48,3 +48,9 @@ v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d"
v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s"
v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size %"PRId64" atime=%"PRId64" mtime=%"PRId64" }"
v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u"
# xen-9p-backend.c
xen_9pfs_alloc(char *name) "name %s"
xen_9pfs_connect(char *name) "name %s"
xen_9pfs_disconnect(char *name) "name %s"
xen_9pfs_free(char *name) "name %s"

View file

@ -24,6 +24,8 @@
#include "qemu/option.h"
#include "fsdev/qemu-fsdev.h"
#include "trace.h"
#define VERSIONS "1"
#define MAX_RINGS 8
#define MAX_RING_ORDER 9
@ -335,6 +337,8 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
int i;
trace_xen_9pfs_disconnect(xendev->name);
for (i = 0; i < xen_9pdev->num_rings; i++) {
if (xen_9pdev->rings[i].evtchndev != NULL) {
qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
@ -343,39 +347,40 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
xen_9pdev->rings[i].local_port);
xen_9pdev->rings[i].evtchndev = NULL;
}
}
}
static int xen_9pfs_free(struct XenLegacyDevice *xendev)
{
Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
int i;
if (xen_9pdev->rings[0].evtchndev != NULL) {
xen_9pfs_disconnect(xendev);
}
for (i = 0; i < xen_9pdev->num_rings; i++) {
if (xen_9pdev->rings[i].data != NULL) {
xen_be_unmap_grant_refs(&xen_9pdev->xendev,
xen_9pdev->rings[i].data,
(1 << xen_9pdev->rings[i].ring_order));
xen_9pdev->rings[i].data = NULL;
}
if (xen_9pdev->rings[i].intf != NULL) {
xen_be_unmap_grant_refs(&xen_9pdev->xendev,
xen_9pdev->rings[i].intf,
1);
xen_9pdev->rings[i].intf = NULL;
}
if (xen_9pdev->rings[i].bh != NULL) {
qemu_bh_delete(xen_9pdev->rings[i].bh);
xen_9pdev->rings[i].bh = NULL;
}
}
g_free(xen_9pdev->id);
xen_9pdev->id = NULL;
g_free(xen_9pdev->tag);
xen_9pdev->tag = NULL;
g_free(xen_9pdev->path);
xen_9pdev->path = NULL;
g_free(xen_9pdev->security_model);
xen_9pdev->security_model = NULL;
g_free(xen_9pdev->rings);
xen_9pdev->rings = NULL;
}
static int xen_9pfs_free(struct XenLegacyDevice *xendev)
{
trace_xen_9pfs_free(xendev->name);
return 0;
}
@ -387,6 +392,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
V9fsState *s = &xen_9pdev->state;
QemuOpts *fsdev;
trace_xen_9pfs_connect(xendev->name);
if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
&xen_9pdev->num_rings) == -1 ||
xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
@ -494,6 +501,8 @@ out:
static void xen_9pfs_alloc(struct XenLegacyDevice *xendev)
{
trace_xen_9pfs_alloc(xendev->name);
xenstore_write_be_str(xendev, "versions", VERSIONS);
xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);

View file

@ -52,6 +52,9 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 1,
.max_access_size = 4,
},
.impl = {
.max_access_size = 1,
},
};

View file

@ -429,6 +429,16 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev,
* acpi_pcihp_eject_slot() when the operation is completed.
*/
pdev->qdev.pending_deleted_event = true;
/* if unplug was requested before OSPM is initialized,
* linux kernel will clear GPE0.sts[] bits during boot, which effectively
* hides unplug event. And than followup qmp_device_del() calls remain
* blocked by above flag permanently.
* Unblock qmp_device_del() by setting expire limit, so user can
* repeat unplug request later when OSPM has been booted.
*/
pdev->qdev.pending_deleted_expires_ms =
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); /* 1 msec */
s->acpi_pcihp_pci_status[bsel].down |= (1U << slot);
acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS);
}

View file

@ -188,33 +188,35 @@ struct AspeedMachineState {
static void aspeed_write_smpboot(ARMCPU *cpu,
const struct arm_boot_info *info)
{
static const uint32_t poll_mailbox_ready[] = {
AddressSpace *as = arm_boot_address_space(cpu, info);
static const ARMInsnFixup poll_mailbox_ready[] = {
/*
* r2 = per-cpu go sign value
* r1 = AST_SMP_MBOX_FIELD_ENTRY
* r0 = AST_SMP_MBOX_FIELD_GOSIGN
*/
0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 */
0xe21000ff, /* ands r0, r0, #255 */
0xe59f201c, /* ldr r2, [pc, #28] */
0xe1822000, /* orr r2, r2, r0 */
{ 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5 */
{ 0xe21000ff }, /* ands r0, r0, #255 */
{ 0xe59f201c }, /* ldr r2, [pc, #28] */
{ 0xe1822000 }, /* orr r2, r2, r0 */
0xe59f1018, /* ldr r1, [pc, #24] */
0xe59f0018, /* ldr r0, [pc, #24] */
{ 0xe59f1018 }, /* ldr r1, [pc, #24] */
{ 0xe59f0018 }, /* ldr r0, [pc, #24] */
0xe320f002, /* wfe */
0xe5904000, /* ldr r4, [r0] */
0xe1520004, /* cmp r2, r4 */
0x1afffffb, /* bne <wfe> */
0xe591f000, /* ldr pc, [r1] */
AST_SMP_MBOX_GOSIGN,
AST_SMP_MBOX_FIELD_ENTRY,
AST_SMP_MBOX_FIELD_GOSIGN,
{ 0xe320f002 }, /* wfe */
{ 0xe5904000 }, /* ldr r4, [r0] */
{ 0xe1520004 }, /* cmp r2, r4 */
{ 0x1afffffb }, /* bne <wfe> */
{ 0xe591f000 }, /* ldr pc, [r1] */
{ AST_SMP_MBOX_GOSIGN },
{ AST_SMP_MBOX_FIELD_ENTRY },
{ AST_SMP_MBOX_FIELD_GOSIGN },
{ 0, FIXUP_TERMINATOR }
};
static const uint32_t fixupcontext[FIXUP_MAX] = { 0 };
rom_add_blob_fixed("aspeed.smpboot", poll_mailbox_ready,
sizeof(poll_mailbox_ready),
info->smp_loader_start);
arm_write_bootloader("aspeed.smpboot", as, info->smp_loader_start,
poll_mailbox_ready, fixupcontext);
}
static void aspeed_reset_secondary(ARMCPU *cpu,

View file

@ -59,26 +59,6 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu,
return cpu_get_address_space(cs, asidx);
}
typedef enum {
FIXUP_NONE = 0, /* do nothing */
FIXUP_TERMINATOR, /* end of insns */
FIXUP_BOARDID, /* overwrite with board ID number */
FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */
FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */
FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */
FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */
FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */
FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */
FIXUP_BOOTREG, /* overwrite with boot register address */
FIXUP_DSB, /* overwrite with correct DSB insn for cpu */
FIXUP_MAX,
} FixupType;
typedef struct ARMInsnFixup {
uint32_t insn;
FixupType fixup;
} ARMInsnFixup;
static const ARMInsnFixup bootloader_aarch64[] = {
{ 0x580000c0 }, /* ldr x0, arg ; Load the lower 32-bits of DTB */
{ 0xaa1f03e1 }, /* mov x1, xzr */
@ -149,9 +129,10 @@ static const ARMInsnFixup smpboot[] = {
{ 0, FIXUP_TERMINATOR }
};
static void write_bootloader(const char *name, hwaddr addr,
const ARMInsnFixup *insns, uint32_t *fixupcontext,
AddressSpace *as)
void arm_write_bootloader(const char *name,
AddressSpace *as, hwaddr addr,
const ARMInsnFixup *insns,
const uint32_t *fixupcontext)
{
/* Fix up the specified bootloader fragment and write it into
* guest memory using rom_add_blob_fixed(). fixupcontext is
@ -213,8 +194,8 @@ static void default_write_secondary(ARMCPU *cpu,
fixupcontext[FIXUP_DSB] = CP15_DSB_INSN;
}
write_bootloader("smpboot", info->smp_loader_start,
smpboot, fixupcontext, as);
arm_write_bootloader("smpboot", as, info->smp_loader_start,
smpboot, fixupcontext);
}
void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
@ -686,7 +667,10 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds,
rom_ptr_for_as(as, addr, size));
g_free(fdt);
if (fdt != ms->fdt) {
g_free(ms->fdt);
ms->fdt = fdt;
}
return size;
@ -1171,8 +1155,8 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
fixupcontext[FIXUP_ENTRYPOINT_LO] = entry;
fixupcontext[FIXUP_ENTRYPOINT_HI] = entry >> 32;
write_bootloader("bootloader", info->loader_start,
primary_loader, fixupcontext, as);
arm_write_bootloader("bootloader", as, info->loader_start,
primary_loader, fixupcontext);
if (info->write_board_setup) {
info->write_board_setup(cpu, info);

View file

@ -16,6 +16,7 @@
#include "qemu/units.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "hw/arm/boot.h"
#include "hw/arm/bcm2836.h"
#include "hw/registerfields.h"
#include "qemu/error-report.h"
@ -124,20 +125,22 @@ static const char *board_type(uint32_t board_rev)
static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info)
{
static const uint32_t smpboot[] = {
0xe1a0e00f, /* mov lr, pc */
0xe3a0fe00 + (BOARDSETUP_ADDR >> 4), /* mov pc, BOARDSETUP_ADDR */
0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5;get core ID */
0xe7e10050, /* ubfx r0, r0, #0, #2 ;extract LSB */
0xe59f5014, /* ldr r5, =0x400000CC ;load mbox base */
0xe320f001, /* 1: yield */
0xe7953200, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core*/
0xe3530000, /* cmp r3, #0 ;spin while zero */
0x0afffffb, /* beq 1b */
0xe7853200, /* str r3, [r5, r0, lsl #4] ;clear mbox */
0xe12fff13, /* bx r3 ;jump to target */
0x400000cc, /* (constant: mailbox 3 read/clear base) */
static const ARMInsnFixup smpboot[] = {
{ 0xe1a0e00f }, /* mov lr, pc */
{ 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4) }, /* mov pc, BOARDSETUP_ADDR */
{ 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5;get core ID */
{ 0xe7e10050 }, /* ubfx r0, r0, #0, #2 ;extract LSB */
{ 0xe59f5014 }, /* ldr r5, =0x400000CC ;load mbox base */
{ 0xe320f001 }, /* 1: yield */
{ 0xe7953200 }, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core */
{ 0xe3530000 }, /* cmp r3, #0 ;spin while zero */
{ 0x0afffffb }, /* beq 1b */
{ 0xe7853200 }, /* str r3, [r5, r0, lsl #4] ;clear mbox */
{ 0xe12fff13 }, /* bx r3 ;jump to target */
{ 0x400000cc }, /* (constant: mailbox 3 read/clear base) */
{ 0, FIXUP_TERMINATOR }
};
static const uint32_t fixupcontext[FIXUP_MAX] = { 0 };
/* check that we don't overrun board setup vectors */
QEMU_BUILD_BUG_ON(SMPBOOT_ADDR + sizeof(smpboot) > MVBAR_ADDR);
@ -145,9 +148,8 @@ static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info)
QEMU_BUILD_BUG_ON((BOARDSETUP_ADDR & 0xf) != 0
|| (BOARDSETUP_ADDR >> 4) >= 0x100);
rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot),
info->smp_loader_start,
arm_boot_address_space(cpu, info));
arm_write_bootloader("raspi_smpboot", arm_boot_address_space(cpu, info),
info->smp_loader_start, smpboot, fixupcontext);
}
static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info)
@ -161,26 +163,28 @@ static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info)
* the primary CPU goes into the kernel. We put these variables inside
* a rom blob, so that the reset for ROM contents zeroes them for us.
*/
static const uint32_t smpboot[] = {
0xd2801b05, /* mov x5, 0xd8 */
0xd53800a6, /* mrs x6, mpidr_el1 */
0x924004c6, /* and x6, x6, #0x3 */
0xd503205f, /* spin: wfe */
0xf86678a4, /* ldr x4, [x5,x6,lsl #3] */
0xb4ffffc4, /* cbz x4, spin */
0xd2800000, /* mov x0, #0x0 */
0xd2800001, /* mov x1, #0x0 */
0xd2800002, /* mov x2, #0x0 */
0xd2800003, /* mov x3, #0x0 */
0xd61f0080, /* br x4 */
static const ARMInsnFixup smpboot[] = {
{ 0xd2801b05 }, /* mov x5, 0xd8 */
{ 0xd53800a6 }, /* mrs x6, mpidr_el1 */
{ 0x924004c6 }, /* and x6, x6, #0x3 */
{ 0xd503205f }, /* spin: wfe */
{ 0xf86678a4 }, /* ldr x4, [x5,x6,lsl #3] */
{ 0xb4ffffc4 }, /* cbz x4, spin */
{ 0xd2800000 }, /* mov x0, #0x0 */
{ 0xd2800001 }, /* mov x1, #0x0 */
{ 0xd2800002 }, /* mov x2, #0x0 */
{ 0xd2800003 }, /* mov x3, #0x0 */
{ 0xd61f0080 }, /* br x4 */
{ 0, FIXUP_TERMINATOR }
};
static const uint32_t fixupcontext[FIXUP_MAX] = { 0 };
static const uint64_t spintables[] = {
0, 0, 0, 0
};
rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot),
info->smp_loader_start, as);
arm_write_bootloader("raspi_smpboot", as, info->smp_loader_start,
smpboot, fixupcontext);
rom_add_blob_fixed_as("raspi_spintables", spintables, sizeof(spintables),
SPINTABLE_ADDR, as);
}

View file

@ -213,7 +213,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s,
const char *boot_cpu, Error **errp)
{
int i;
int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS,
int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS),
XLNX_ZYNQMP_NUM_RPU_CPUS);
if (num_rpus <= 0) {

View file

@ -42,6 +42,9 @@
GlobalProperty hw_compat_7_1[] = {
{ "virtio-device", "queue_reset", "false" },
{ "virtio-rng-pci", "vectors", "0" },
{ "virtio-rng-pci-transitional", "vectors", "0" },
{ "virtio-rng-pci-non-transitional", "vectors", "0" },
};
const size_t hw_compat_7_1_len = G_N_ELEMENTS(hw_compat_7_1);
@ -1326,6 +1329,14 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error *
}
} else if (machine_class->default_ram_id && machine->ram_size &&
numa_uses_legacy_mem()) {
if (object_property_find(object_get_objects_root(),
machine_class->default_ram_id)) {
error_setg(errp, "object name '%s' is reserved for the default"
" RAM backend, it can't be used for any other purposes."
" Change the object's 'id' to something else",
machine_class->default_ram_id);
return;
}
if (!create_default_memdev(current_machine, mem_path, errp)) {
return;
}

View file

@ -498,6 +498,8 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
struct virtio_gpu_resource_flush rf;
struct virtio_gpu_scanout *scanout;
pixman_region16_t flush_region;
bool within_bounds = false;
bool update_submitted = false;
int i;
VIRTIO_GPU_FILL_CMD(rf);
@ -518,13 +520,28 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
rf.r.x < scanout->x + scanout->width &&
rf.r.x + rf.r.width >= scanout->x &&
rf.r.y < scanout->y + scanout->height &&
rf.r.y + rf.r.height >= scanout->y &&
console_has_gl(scanout->con)) {
dpy_gl_update(scanout->con, 0, 0, scanout->width,
scanout->height);
rf.r.y + rf.r.height >= scanout->y) {
within_bounds = true;
if (console_has_gl(scanout->con)) {
dpy_gl_update(scanout->con, 0, 0, scanout->width,
scanout->height);
update_submitted = true;
}
}
}
return;
if (update_submitted) {
return;
}
if (!within_bounds) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
" bounds for flush %d: %d %d %d %d\n",
__func__, rf.resource_id, rf.r.x, rf.r.y,
rf.r.width, rf.r.height);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
}
if (!res->blob &&

View file

@ -168,6 +168,11 @@ static inline int stream_idle(struct Stream *s)
return !!(s->regs[R_DMASR] & DMASR_IDLE);
}
static inline int stream_halted(struct Stream *s)
{
return !!(s->regs[R_DMASR] & DMASR_HALTED);
}
static void stream_reset(struct Stream *s)
{
s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
@ -269,7 +274,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev,
uint64_t addr;
bool eop;
if (!stream_running(s) || stream_idle(s)) {
if (!stream_running(s) || stream_idle(s) || stream_halted(s)) {
return;
}
@ -326,7 +331,7 @@ static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf,
unsigned int rxlen;
size_t pos = 0;
if (!stream_running(s) || stream_idle(s)) {
if (!stream_running(s) || stream_idle(s) || stream_halted(s)) {
return 0;
}
@ -407,7 +412,7 @@ xilinx_axidma_data_stream_can_push(StreamSink *obj,
XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
struct Stream *s = &ds->dma->streams[1];
if (!stream_running(s) || stream_idle(s)) {
if (!stream_running(s) || stream_idle(s) || stream_halted(s)) {
ds->dma->notify = notify;
ds->dma->notify_opaque = notify_opaque;
return false;

View file

@ -123,6 +123,7 @@ static FWCfgState *create_fw_cfg(MachineState *ms)
{
FWCfgState *fw_cfg;
uint64_t val;
const char qemu_version[] = QEMU_VERSION;
fw_cfg = fw_cfg_init_mem(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4);
fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, ms->smp.cpus);
@ -148,6 +149,10 @@ static FWCfgState *create_fw_cfg(MachineState *ms)
fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ms->boot_config.order[0]);
qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
fw_cfg_add_file(fw_cfg, "/etc/qemu-version",
g_memdup(qemu_version, sizeof(qemu_version)),
sizeof(qemu_version));
return fw_cfg;
}
@ -418,10 +423,16 @@ static void hppa_machine_reset(MachineState *ms, ShutdownCause reason)
/* Start all CPUs at the firmware entry point.
* Monarch CPU will initialize firmware, secondary CPUs
* will enter a small idle look and wait for rendevouz. */
* will enter a small idle loop and wait for rendevouz. */
for (i = 0; i < smp_cpus; i++) {
cpu_set_pc(CPU(cpu[i]), firmware_entry);
CPUState *cs = CPU(cpu[i]);
cpu_set_pc(cs, firmware_entry);
cpu[i]->env.psw = PSW_Q;
cpu[i]->env.gr[5] = CPU_HPA + i * 0x1000;
cs->exception_index = -1;
cs->halted = 0;
}
/* already initialized by machine_hppa_init()? */

View file

@ -3179,6 +3179,7 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
{
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
IntelIOMMUState *s = vtd_as->iommu_state;
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
/* TODO: add support for VFIO and vhost users */
if (s->snoop_control) {
@ -3186,6 +3187,20 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
"Snoop Control with vhost or VFIO is not supported");
return -ENOTSUP;
}
if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) {
error_setg_errno(errp, ENOTSUP,
"device %02x.%02x.%x requires caching mode",
pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn),
PCI_FUNC(vtd_as->devfn));
return -ENOTSUP;
}
if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) {
error_setg_errno(errp, ENOTSUP,
"device %02x.%02x.%x requires device IOTLB mode",
pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn),
PCI_FUNC(vtd_as->devfn));
return -ENOTSUP;
}
/* Update per-address-space notifier flags */
vtd_as->notifier_flags = new;

View file

@ -330,7 +330,7 @@ static void microvm_memory_init(MicrovmMachineState *mms)
rom_set_fw(fw_cfg);
if (machine->kernel_filename != NULL) {
x86_load_linux(x86ms, fw_cfg, 0, true, false);
x86_load_linux(x86ms, fw_cfg, 0, true);
}
if (mms->option_roms) {

View file

@ -799,7 +799,7 @@ void xen_load_linux(PCMachineState *pcms)
rom_set_fw(fw_cfg);
x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size,
pcmc->pvh_enabled, pcmc->legacy_no_rng_seed);
pcmc->pvh_enabled);
for (i = 0; i < nb_option_roms; i++) {
assert(!strcmp(option_rom[i].name, "linuxboot.bin") ||
!strcmp(option_rom[i].name, "linuxboot_dma.bin") ||
@ -1119,7 +1119,7 @@ void pc_memory_init(PCMachineState *pcms,
if (linux_boot) {
x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size,
pcmc->pvh_enabled, pcmc->legacy_no_rng_seed);
pcmc->pvh_enabled);
}
for (i = 0; i < nb_option_roms; i++) {

View file

@ -405,6 +405,7 @@ static void pc_xen_hvm_init(MachineState *machine)
}
pc_xen_hvm_init_pci(machine);
xen_igd_reserve_slot(pcms->bus);
pci_create_simple(pcms->bus, -1, "xen-platform");
}
#endif
@ -449,11 +450,9 @@ DEFINE_I440FX_MACHINE(v7_2, "pc-i440fx-7.2", NULL,
static void pc_i440fx_7_1_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_7_2_machine_options(m);
m->alias = NULL;
m->is_default = false;
pcmc->legacy_no_rng_seed = true;
compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len);
compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len);
}

View file

@ -383,10 +383,8 @@ DEFINE_Q35_MACHINE(v7_2, "pc-q35-7.2", NULL,
static void pc_q35_7_1_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_7_2_machine_options(m);
m->alias = NULL;
pcmc->legacy_no_rng_seed = true;
compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len);
compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len);
}

View file

@ -26,7 +26,6 @@
#include "qemu/cutils.h"
#include "qemu/units.h"
#include "qemu/datadir.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qapi-visit-common.h"
@ -37,7 +36,6 @@
#include "sysemu/whpx.h"
#include "sysemu/numa.h"
#include "sysemu/replay.h"
#include "sysemu/reset.h"
#include "sysemu/sysemu.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/xen.h"
@ -662,12 +660,12 @@ DeviceState *ioapic_init_secondary(GSIState *gsi_state)
return dev;
}
typedef struct SetupData {
struct setup_data {
uint64_t next;
uint32_t type;
uint32_t len;
uint8_t data[];
} __attribute__((packed)) SetupData;
} __attribute__((packed));
/*
@ -774,35 +772,10 @@ static bool load_elfboot(const char *kernel_filename,
return true;
}
typedef struct SetupDataFixup {
void *pos;
hwaddr orig_val, new_val;
uint32_t addr;
} SetupDataFixup;
static void fixup_setup_data(void *opaque)
{
SetupDataFixup *fixup = opaque;
stq_p(fixup->pos, fixup->new_val);
}
static void reset_setup_data(void *opaque)
{
SetupDataFixup *fixup = opaque;
stq_p(fixup->pos, fixup->orig_val);
}
static void reset_rng_seed(void *opaque)
{
SetupData *setup_data = opaque;
qemu_guest_getrandom_nofail(setup_data->data, le32_to_cpu(setup_data->len));
}
void x86_load_linux(X86MachineState *x86ms,
FWCfgState *fw_cfg,
int acpi_data_size,
bool pvh_enabled,
bool legacy_no_rng_seed)
bool pvh_enabled)
{
bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled;
uint16_t protocol;
@ -810,17 +783,16 @@ void x86_load_linux(X86MachineState *x86ms,
int dtb_size, setup_data_offset;
uint32_t initrd_max;
uint8_t header[8192], *setup, *kernel;
hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0, first_setup_data = 0;
hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0;
FILE *f;
char *vmode;
MachineState *machine = MACHINE(x86ms);
SetupData *setup_data;
struct setup_data *setup_data;
const char *kernel_filename = machine->kernel_filename;
const char *initrd_filename = machine->initrd_filename;
const char *dtb_filename = machine->dtb;
const char *kernel_cmdline = machine->kernel_cmdline;
SevKernelLoaderContext sev_load_ctx = {};
enum { RNG_SEED_LENGTH = 32 };
/* Align to 16 bytes as a paranoia measure */
cmdline_size = (strlen(kernel_cmdline) + 16) & ~15;
@ -1097,41 +1069,19 @@ void x86_load_linux(X86MachineState *x86ms,
}
setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16);
kernel_size = setup_data_offset + sizeof(SetupData) + dtb_size;
kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size;
kernel = g_realloc(kernel, kernel_size);
stq_p(header + 0x250, prot_addr + setup_data_offset);
setup_data = (SetupData *)(kernel + setup_data_offset);
setup_data->next = cpu_to_le64(first_setup_data);
first_setup_data = prot_addr + setup_data_offset;
setup_data = (struct setup_data *)(kernel + setup_data_offset);
setup_data->next = 0;
setup_data->type = cpu_to_le32(SETUP_DTB);
setup_data->len = cpu_to_le32(dtb_size);
load_image_size(dtb_filename, setup_data->data, dtb_size);
}
if (!legacy_no_rng_seed) {
setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16);
kernel_size = setup_data_offset + sizeof(SetupData) + RNG_SEED_LENGTH;
kernel = g_realloc(kernel, kernel_size);
setup_data = (SetupData *)(kernel + setup_data_offset);
setup_data->next = cpu_to_le64(first_setup_data);
first_setup_data = prot_addr + setup_data_offset;
setup_data->type = cpu_to_le32(SETUP_RNG_SEED);
setup_data->len = cpu_to_le32(RNG_SEED_LENGTH);
qemu_guest_getrandom_nofail(setup_data->data, RNG_SEED_LENGTH);
qemu_register_reset_nosnapshotload(reset_rng_seed, setup_data);
fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_KERNEL_DATA, reset_rng_seed, NULL,
setup_data, kernel, kernel_size, true);
} else {
fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
}
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
sev_load_ctx.kernel_data = (char *)kernel;
sev_load_ctx.kernel_size = kernel_size;
/*
* If we're starting an encrypted VM, it will be OVMF based, which uses the
* efi stub for booting and doesn't require any values to be placed in the
@ -1140,20 +1090,16 @@ void x86_load_linux(X86MachineState *x86ms,
* file the user passed in.
*/
if (!sev_enabled()) {
SetupDataFixup *fixup = g_malloc(sizeof(*fixup));
memcpy(setup, header, MIN(sizeof(header), setup_size));
/* Offset 0x250 is a pointer to the first setup_data link. */
fixup->pos = setup + 0x250;
fixup->orig_val = ldq_p(fixup->pos);
fixup->new_val = first_setup_data;
fixup->addr = cpu_to_le32(real_addr);
fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_SETUP_ADDR, fixup_setup_data, NULL,
fixup, &fixup->addr, sizeof(fixup->addr), true);
qemu_register_reset(reset_setup_data, fixup);
} else {
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
}
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
sev_load_ctx.kernel_data = (char *)kernel;
sev_load_ctx.kernel_size = kernel_size;
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size);
sev_load_ctx.setup_data = (char *)setup;

View file

@ -49,12 +49,9 @@ static void aw_a10_pic_update(AwA10PICState *s)
static void aw_a10_pic_set_irq(void *opaque, int irq, int level)
{
AwA10PICState *s = opaque;
uint32_t *pending_reg = &s->irq_pending[irq / 32];
if (level) {
set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]);
} else {
clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]);
}
*pending_reg = deposit32(*pending_reg, irq % 32, 1, !!level);
aw_a10_pic_update(s);
}

View file

@ -189,7 +189,7 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
bool acc_mode)
{
struct iovec iov[ASPEED_HACE_MAX_SG];
g_autofree uint8_t *digest_buf;
g_autofree uint8_t *digest_buf = NULL;
size_t digest_len = 0;
int niov = 0;
int i;

View file

@ -350,8 +350,13 @@ static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s,
FrameDescriptor *desc,
uint32_t phys_addr)
{
dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc),
uint32_t desc_words[4];
dma_memory_read(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words),
MEMTXATTRS_UNSPECIFIED);
desc->status = le32_to_cpu(desc_words[0]);
desc->status2 = le32_to_cpu(desc_words[1]);
desc->addr = le32_to_cpu(desc_words[2]);
desc->next = le32_to_cpu(desc_words[3]);
}
static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s,
@ -400,10 +405,15 @@ static uint32_t allwinner_sun8i_emac_tx_desc(AwSun8iEmacState *s,
}
static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s,
FrameDescriptor *desc,
const FrameDescriptor *desc,
uint32_t phys_addr)
{
dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc),
uint32_t desc_words[4];
desc_words[0] = cpu_to_le32(desc->status);
desc_words[1] = cpu_to_le32(desc->status2);
desc_words[2] = cpu_to_le32(desc->addr);
desc_words[3] = cpu_to_le32(desc->next);
dma_memory_write(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words),
MEMTXATTRS_UNSPECIFIED);
}
@ -638,8 +648,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset,
break;
case REG_TX_CUR_BUF: /* Transmit Current Buffer */
if (s->tx_desc_curr != 0) {
dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc),
MEMTXATTRS_UNSPECIFIED);
allwinner_sun8i_emac_get_desc(s, &desc, s->tx_desc_curr);
value = desc.addr;
} else {
value = 0;
@ -652,8 +661,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset,
break;
case REG_RX_CUR_BUF: /* Receive Current Buffer */
if (s->rx_desc_curr != 0) {
dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc),
MEMTXATTRS_UNSPECIFIED);
allwinner_sun8i_emac_get_desc(s, &desc, s->rx_desc_curr);
value = desc.addr;
} else {
value = 0;

View file

@ -567,7 +567,7 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
qemu_send_packet(nc, buf, size);
}
inc_tx_bcast_or_mcast_count(s, buf);
e1000x_increase_size_stats(s->mac_reg, PTCregs, size);
e1000x_increase_size_stats(s->mac_reg, PTCregs, size + 4);
}
static void
@ -631,10 +631,9 @@ xmit_seg(E1000State *s)
}
e1000x_inc_reg_if_not_full(s->mac_reg, TPT);
e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size);
s->mac_reg[GPTC] = s->mac_reg[TPT];
s->mac_reg[GOTCL] = s->mac_reg[TOTL];
s->mac_reg[GOTCH] = s->mac_reg[TOTH];
e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4);
e1000x_inc_reg_if_not_full(s->mac_reg, GPTC);
e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4);
}
static void

View file

@ -687,9 +687,8 @@ e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt)
g_assert_not_reached();
}
core->mac[GPTC] = core->mac[TPT];
core->mac[GOTCL] = core->mac[TOTL];
core->mac[GOTCH] = core->mac[TOTH];
e1000x_inc_reg_if_not_full(core->mac, GPTC);
e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len);
}
static void

View file

@ -217,15 +217,14 @@ e1000x_update_rx_total_stats(uint32_t *mac,
e1000x_increase_size_stats(mac, PRCregs, data_fcs_size);
e1000x_inc_reg_if_not_full(mac, TPR);
mac[GPRC] = mac[TPR];
e1000x_inc_reg_if_not_full(mac, GPRC);
/* TOR - Total Octets Received:
* This register includes bytes received in a packet from the <Destination
* Address> field through the <CRC> field, inclusively.
* Always include FCS length (4) in size.
*/
e1000x_grow_8reg_if_not_full(mac, TORL, data_size + 4);
mac[GORCL] = mac[TORL];
mac[GORCH] = mac[TORH];
e1000x_grow_8reg_if_not_full(mac, GORCL, data_size + 4);
}
void

View file

@ -118,14 +118,18 @@ static void emac_load_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc)
d->next = le32_to_cpu(d->next);
}
static void emac_store_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc)
static void emac_store_desc(MSF2EmacState *s, const EmacDesc *d, hwaddr desc)
{
/* Convert from host endianness into LE. */
d->pktaddr = cpu_to_le32(d->pktaddr);
d->pktsize = cpu_to_le32(d->pktsize);
d->next = cpu_to_le32(d->next);
EmacDesc outd;
/*
* Convert from host endianness into LE. We use a local struct because
* calling code may still want to look at the fields afterwards.
*/
outd.pktaddr = cpu_to_le32(d->pktaddr);
outd.pktsize = cpu_to_le32(d->pktsize);
outd.next = cpu_to_le32(d->next);
address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, d, sizeof *d);
address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, &outd, sizeof outd);
}
static void msf2_dma_tx(MSF2EmacState *s)

View file

@ -2154,6 +2154,9 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s)
int large_send_mss = (txdw0 >> CP_TC_LGSEN_MSS_SHIFT) &
CP_TC_LGSEN_MSS_MASK;
if (large_send_mss == 0) {
goto skip_offload;
}
DPRINTF("+++ C+ mode offloaded task TSO IP data %d "
"frame data %d specified MSS=%d\n",

View file

@ -802,7 +802,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
}
if (!get_vhost_net(nc->peer)) {
virtio_add_feature(&features, VIRTIO_F_RING_RESET);
return features;
}

View file

@ -1441,7 +1441,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
vmxnet3_setup_rx_filtering(s);
/* Cache fields from shared memory */
s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu);
assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu < VMXNET3_MAX_MTU);
assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu <= VMXNET3_MAX_MTU);
VMW_CFPRN("MTU is %u", s->mtu);
s->max_rx_frags =

View file

@ -1331,10 +1331,23 @@ static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
}
}
static void nvme_update_cq_eventidx(const NvmeCQueue *cq)
{
uint32_t v = cpu_to_le32(cq->head);
//not in 7.2: trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head);
pci_dma_write(PCI_DEVICE(cq->ctrl), cq->ei_addr, &v, sizeof(v));
}
static void nvme_update_cq_head(NvmeCQueue *cq)
{
pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &cq->head,
sizeof(cq->head));
uint32_t v;
pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &v, sizeof(v));
cq->head = le32_to_cpu(v);
trace_pci_nvme_shadow_doorbell_cq(cq->cqid, cq->head);
}
@ -1351,6 +1364,7 @@ static void nvme_post_cqes(void *opaque)
hwaddr addr;
if (n->dbbuf_enabled) {
nvme_update_cq_eventidx(cq);
nvme_update_cq_head(cq);
}
@ -2477,6 +2491,9 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr,
req);
if (status) {
g_free(iocb->range);
qemu_aio_unref(iocb);
return status;
}
@ -6141,15 +6158,21 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
static void nvme_update_sq_eventidx(const NvmeSQueue *sq)
{
pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &sq->tail,
sizeof(sq->tail));
uint32_t v = cpu_to_le32(sq->tail);
pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &v, sizeof(v));
trace_pci_nvme_eventidx_sq(sq->sqid, sq->tail);
}
static void nvme_update_sq_tail(NvmeSQueue *sq)
{
pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &sq->tail,
sizeof(sq->tail));
uint32_t v;
pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &v, sizeof(v));
sq->tail = le32_to_cpu(v);
trace_pci_nvme_shadow_doorbell_sq(sq->sqid, sq->tail);
}

View file

@ -693,12 +693,12 @@ static const VMStateDescription vmstate_fw_cfg = {
}
};
void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
FWCfgCallback select_cb,
FWCfgWriteCallback write_cb,
void *callback_opaque,
void *data, size_t len,
bool read_only)
static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
FWCfgCallback select_cb,
FWCfgWriteCallback write_cb,
void *callback_opaque,
void *data, size_t len,
bool read_only)
{
int arch = !!(key & FW_CFG_ARCH_LOCAL);

View file

@ -806,6 +806,8 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
int64_t signed_decr;
/* Truncate value to decr_width and sign extend for simplicity */
value = extract64(value, 0, nr_bits);
decr = extract64(decr, 0, nr_bits);
signed_value = sextract64(value, 0, nr_bits);
signed_decr = sextract64(decr, 0, nr_bits);

View file

@ -271,9 +271,11 @@ static void ibm_40p_init(MachineState *machine)
}
/* PCI -> ISA bridge */
i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(11, 0), "i82378"));
i82378_dev = DEVICE(pci_new(PCI_DEVFN(11, 0), "i82378"));
qdev_connect_gpio_out(i82378_dev, 0,
qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT));
qdev_realize_and_unref(i82378_dev, BUS(pci_bus), &error_fatal);
sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15));
isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0"));

View file

@ -796,6 +796,12 @@ int pvrdma_exec_cmd(PVRDMADev *dev)
dsr_info = &dev->dsr_info;
if (!dsr_info->dsr) {
/* Buggy or malicious guest driver */
rdma_error_report("Exec command without dsr, req or rsp buffers");
goto out;
}
if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
sizeof(struct cmd_handler)) {
rdma_error_report("Unsupported command");

View file

@ -5,8 +5,8 @@ mpqemu_recv_io_error(int cmd, int size, int nfds) "failed to receive %d size %d,
# vfio-user-obj.c
vfu_prop(const char *prop, const char *val) "vfu: setting %s as %s"
vfu_cfg_read(uint32_t offset, uint32_t val) "vfu: cfg: 0x%u -> 0x%x"
vfu_cfg_write(uint32_t offset, uint32_t val) "vfu: cfg: 0x%u <- 0x%x"
vfu_cfg_read(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x -> 0x%x"
vfu_cfg_write(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x <- 0x%x"
vfu_dma_register(uint64_t gpa, size_t len) "vfu: registering GPA 0x%"PRIx64", %zu bytes"
vfu_dma_unregister(uint64_t gpa) "vfu: unregistering GPA 0x%"PRIx64""
vfu_bar_register(int i, uint64_t addr, uint64_t size) "vfu: BAR %d: addr 0x%"PRIx64" size 0x%"PRIx64""

View file

@ -207,6 +207,12 @@ int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx)
{
int64_t nidx = 0;
if (ms->numa_state->num_nodes > ms->smp.cpus) {
error_report("Number of NUMA nodes (%d)"
" cannot exceed the number of available CPUs (%d).",
ms->numa_state->num_nodes, ms->smp.max_cpus);
exit(EXIT_FAILURE);
}
if (ms->numa_state->num_nodes) {
nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes);
if (ms->numa_state->num_nodes <= nidx) {

View file

@ -1134,15 +1134,24 @@ static void lsi_execute_script(LSIState *s)
uint32_t addr, addr_high;
int opcode;
int insn_processed = 0;
static int reentrancy_level;
reentrancy_level++;
s->istat1 |= LSI_ISTAT1_SRUN;
again:
if (++insn_processed > LSI_MAX_INSN) {
/* Some windows drivers make the device spin waiting for a memory
location to change. If we have been executed a lot of code then
assume this is the case and force an unexpected device disconnect.
This is apparently sufficient to beat the drivers into submission.
*/
/*
* Some windows drivers make the device spin waiting for a memory location
* to change. If we have executed more than LSI_MAX_INSN instructions then
* assume this is the case and force an unexpected device disconnect. This
* is apparently sufficient to beat the drivers into submission.
*
* Another issue (CVE-2023-0330) can occur if the script is programmed to
* trigger itself again and again. Avoid this problem by stopping after
* being called multiple times in a reentrant way (8 is an arbitrary value
* which should be enough for all valid use cases).
*/
if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) {
if (!(s->sien0 & LSI_SIST0_UDC)) {
qemu_log_mask(LOG_GUEST_ERROR,
"lsi_scsi: inf. loop with UDC masked");
@ -1596,6 +1605,8 @@ again:
}
}
trace_lsi_execute_script_stop();
reentrancy_level--;
}
static uint8_t lsi_reg_readb(LSIState *s, int offset)

View file

@ -190,12 +190,16 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
(r->req.cmd.buf[1] & 0x01)) {
page = r->req.cmd.buf[2];
if (page == 0xb0) {
if (page == 0xb0 && r->buflen >= 8) {
uint8_t buf[16] = {};
uint8_t buf_used = MIN(r->buflen, 16);
uint64_t max_transfer = calculate_max_transfer(s);
stl_be_p(&r->buf[8], max_transfer);
/* Also take care of the opt xfer len. */
stl_be_p(&r->buf[12],
MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
memcpy(buf, r->buf, buf_used);
stl_be_p(&buf[8], max_transfer);
stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12])));
memcpy(r->buf + 8, buf + 8, buf_used - 8);
} else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
/*
* Now we're capable of supplying the VPD Block Limits

View file

@ -302,6 +302,30 @@ static void allwinner_sdhost_auto_stop(AwSdHostState *s)
}
}
static void read_descriptor(AwSdHostState *s, hwaddr desc_addr,
TransferDescriptor *desc)
{
uint32_t desc_words[4];
dma_memory_read(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words),
MEMTXATTRS_UNSPECIFIED);
desc->status = le32_to_cpu(desc_words[0]);
desc->size = le32_to_cpu(desc_words[1]);
desc->addr = le32_to_cpu(desc_words[2]);
desc->next = le32_to_cpu(desc_words[3]);
}
static void write_descriptor(AwSdHostState *s, hwaddr desc_addr,
const TransferDescriptor *desc)
{
uint32_t desc_words[4];
desc_words[0] = cpu_to_le32(desc->status);
desc_words[1] = cpu_to_le32(desc->size);
desc_words[2] = cpu_to_le32(desc->addr);
desc_words[3] = cpu_to_le32(desc->next);
dma_memory_write(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words),
MEMTXATTRS_UNSPECIFIED);
}
static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s,
hwaddr desc_addr,
TransferDescriptor *desc,
@ -312,9 +336,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s,
uint32_t num_bytes = max_bytes;
uint8_t buf[1024];
/* Read descriptor */
dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc),
MEMTXATTRS_UNSPECIFIED);
read_descriptor(s, desc_addr, desc);
if (desc->size == 0) {
desc->size = klass->max_desc_size;
} else if (desc->size > klass->max_desc_size) {
@ -356,8 +378,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s,
/* Clear hold flag and flush descriptor */
desc->status &= ~DESC_STATUS_HOLD;
dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc),
MEMTXATTRS_UNSPECIFIED);
write_descriptor(s, desc_addr, desc);
return num_done;
}

View file

@ -749,14 +749,16 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores;
t->core_enabled = t->core_count;
t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores);
t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads;
t->thread_count2 = cpu_to_le16(ms->smp.threads);
t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */
t->processor_family2 = cpu_to_le16(0x01); /* Other */
if (tbl_len == SMBIOS_TYPE_4_LEN_V30) {
t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores);
t->thread_count2 = cpu_to_le16(ms->smp.threads);
}
SMBIOS_BUILD_TABLE_POST;
smbios_type4_count++;
}

View file

@ -352,6 +352,16 @@ static const VMStateDescription vmstate_hpet = {
}
};
static void hpet_arm(HPETTimer *t, uint64_t ticks)
{
if (ticks < ns_to_ticks(INT64_MAX / 2)) {
timer_mod(t->qemu_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks_to_ns(ticks));
} else {
timer_del(t->qemu_timer);
}
}
/*
* timer expiration callback
*/
@ -374,13 +384,11 @@ static void hpet_timer(void *opaque)
}
}
diff = hpet_calculate_diff(t, cur_tick);
timer_mod(t->qemu_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff));
hpet_arm(t, diff);
} else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) {
if (t->wrap_flag) {
diff = hpet_calculate_diff(t, cur_tick);
timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(int64_t)ticks_to_ns(diff));
hpet_arm(t, diff);
t->wrap_flag = 0;
}
}
@ -407,8 +415,7 @@ static void hpet_set_timer(HPETTimer *t)
t->wrap_flag = 1;
}
}
timer_mod(t->qemu_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff));
hpet_arm(t, diff);
}
static void hpet_del_timer(HPETTimer *t)

View file

@ -45,7 +45,12 @@ static uint32_t update_counter(NRF51TimerState *s, int64_t now)
uint32_t ticks = ns_to_ticks(s, now - s->update_counter_ns);
s->counter = (s->counter + ticks) % BIT(bitwidths[s->bitmode]);
s->update_counter_ns = now;
/*
* Only advance the sync time to the timestamp of the last tick,
* not all the way to 'now', so we don't lose time if we do
* multiple resyncs in a single tick.
*/
s->update_counter_ns += ticks_to_ns(s, ticks);
return ticks;
}

View file

@ -1214,6 +1214,8 @@ static void ohci_frame_boundary(void *opaque)
/* Increment frame number and take care of endianness. */
ohci->frame_number = (ohci->frame_number + 1) & 0xffff;
hcca.frame = cpu_to_le16(ohci->frame_number);
/* When the HC updates frame number, set pad to 0. Ref OHCI Spec 4.4.1*/
hcca.pad = 0;
if (ohci->done_count == 0 && !(ohci->intr_status & OHCI_INTR_WD)) {
if (!ohci->done)

View file

@ -663,6 +663,8 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev)
vfio_disable_interrupts(vdev);
vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
retry:
/*
* Setting vector notifiers needs to enable route for each vector.
* Deferring to commit the KVM routes once rather than per vector
@ -670,8 +672,6 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev)
*/
vfio_prepare_kvm_msi_virq_batch(vdev);
vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
retry:
vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
for (i = 0; i < vdev->nr_vectors; i++) {
@ -3159,7 +3159,9 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
out_deregister:
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
if (vdev->irqchip_change_notifier.notify) {
kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
}
out_teardown:
vfio_teardown_msi(vdev);
vfio_bars_exit(vdev);

View file

@ -68,7 +68,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
*/
static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
{
return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx);
return svq->num_free;
}
/**
@ -263,6 +263,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
return -EINVAL;
}
svq->num_free -= ndescs;
svq->desc_state[qemu_head].elem = elem;
svq->desc_state[qemu_head].ndescs = ndescs;
vhost_svq_kick(svq);
@ -449,6 +450,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
svq->desc_next[last_used_chain] = svq->free_head;
svq->free_head = used_elem.id;
svq->num_free += num;
*len = used_elem.len;
return g_steal_pointer(&svq->desc_state[used_elem.id].elem);
@ -522,7 +524,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
size_t vhost_svq_poll(VhostShadowVirtqueue *svq)
{
int64_t start_us = g_get_monotonic_time();
uint32_t len;
uint32_t len = 0;
do {
if (vhost_svq_more_used(svq)) {
@ -656,6 +658,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
svq->vq = vq;
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
svq->num_free = svq->vring.num;
driver_size = vhost_svq_driver_area_size(svq);
device_size = vhost_svq_device_area_size(svq);
svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);

View file

@ -107,6 +107,9 @@ typedef struct VhostShadowVirtqueue {
/* Next head to consume from the device */
uint16_t last_used_idx;
/* Size of SVQ vring free descriptors */
uint16_t num_free;
} VhostShadowVirtqueue;
bool vhost_svq_valid_features(uint64_t features, Error **errp);

View file

@ -16,6 +16,7 @@
#include "trace.h"
#define REALIZE_CONNECTION_RETRIES 3
#define VHOST_NVQS 2
/* Features required from VirtIO */
static const int feature_bits[] = {
@ -198,8 +199,7 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio)
{
virtio_delete_queue(gpio->command_vq);
virtio_delete_queue(gpio->interrupt_vq);
g_free(gpio->vhost_dev.vqs);
gpio->vhost_dev.vqs = NULL;
g_free(gpio->vhost_vqs);
virtio_cleanup(vdev);
vhost_user_cleanup(&gpio->vhost_user);
}
@ -219,6 +219,9 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp)
vhost_dev_set_config_notifier(vhost_dev, &gpio_ops);
gpio->vhost_user.supports_config = true;
gpio->vhost_dev.nvqs = VHOST_NVQS;
gpio->vhost_dev.vqs = gpio->vhost_vqs;
ret = vhost_dev_init(vhost_dev, &gpio->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
@ -337,10 +340,9 @@ static void vu_gpio_device_realize(DeviceState *dev, Error **errp)
virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config));
gpio->vhost_dev.nvqs = 2;
gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
gpio->vhost_dev.vqs = g_new0(struct vhost_virtqueue, gpio->vhost_dev.nvqs);
gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS);
gpio->connected = false;

View file

@ -143,8 +143,6 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c)
vhost_user_cleanup(&i2c->vhost_user);
virtio_delete_queue(i2c->vq);
virtio_cleanup(vdev);
g_free(i2c->vhost_dev.vqs);
i2c->vhost_dev.vqs = NULL;
}
static int vu_i2c_connect(DeviceState *dev)
@ -228,6 +226,7 @@ static void vu_i2c_device_realize(DeviceState *dev, Error **errp)
ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
g_free(i2c->vhost_dev.vqs);
do_vhost_user_cleanup(vdev, i2c);
}
@ -239,10 +238,12 @@ static void vu_i2c_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserI2C *i2c = VHOST_USER_I2C(dev);
struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs;
/* This will stop vhost backend if appropriate. */
vu_i2c_set_status(vdev, 0);
vhost_dev_cleanup(&i2c->vhost_dev);
g_free(vhost_vqs);
do_vhost_user_cleanup(vdev, i2c);
}

View file

@ -229,6 +229,7 @@ static void vu_rng_device_realize(DeviceState *dev, Error **errp)
return;
vhost_dev_init_failed:
g_free(rng->vhost_dev.vqs);
virtio_delete_queue(rng->req_vq);
virtio_add_queue_failed:
virtio_cleanup(vdev);
@ -239,12 +240,12 @@ static void vu_rng_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserRNG *rng = VHOST_USER_RNG(dev);
struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs;
vu_rng_set_status(vdev, 0);
vhost_dev_cleanup(&rng->vhost_dev);
g_free(rng->vhost_dev.vqs);
rng->vhost_dev.vqs = NULL;
g_free(vhost_vqs);
virtio_delete_queue(rng->req_vq);
virtio_cleanup(vdev);
vhost_user_cleanup(&rng->vhost_user);

View file

@ -305,19 +305,8 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
return 0;
}
struct vhost_user_read_cb_data {
struct vhost_dev *dev;
VhostUserMsg *msg;
GMainLoop *loop;
int ret;
};
static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
gpointer opaque)
static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
{
struct vhost_user_read_cb_data *data = opaque;
struct vhost_dev *dev = data->dev;
VhostUserMsg *msg = data->msg;
struct vhost_user *u = dev->opaque;
CharBackend *chr = u->user->chr;
uint8_t *p = (uint8_t *) msg;
@ -325,8 +314,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
r = vhost_user_read_header(dev, msg);
if (r < 0) {
data->ret = r;
goto end;
return r;
}
/* validate message size is sane */
@ -334,8 +322,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
error_report("Failed to read msg header."
" Size %d exceeds the maximum %zu.", msg->hdr.size,
VHOST_USER_PAYLOAD_SIZE);
data->ret = -EPROTO;
goto end;
return -EPROTO;
}
if (msg->hdr.size) {
@ -346,84 +333,11 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
int saved_errno = errno;
error_report("Failed to read msg payload."
" Read %d instead of %d.", r, msg->hdr.size);
data->ret = r < 0 ? -saved_errno : -EIO;
goto end;
return r < 0 ? -saved_errno : -EIO;
}
}
end:
g_main_loop_quit(data->loop);
return G_SOURCE_REMOVE;
}
static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
gpointer opaque);
/*
* This updates the read handler to use a new event loop context.
* Event sources are removed from the previous context : this ensures
* that events detected in the previous context are purged. They will
* be re-detected and processed in the new context.
*/
static void slave_update_read_handler(struct vhost_dev *dev,
GMainContext *ctxt)
{
struct vhost_user *u = dev->opaque;
if (!u->slave_ioc) {
return;
}
if (u->slave_src) {
g_source_destroy(u->slave_src);
g_source_unref(u->slave_src);
}
u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
G_IO_IN | G_IO_HUP,
slave_read, dev, NULL,
ctxt);
}
static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
{
struct vhost_user *u = dev->opaque;
CharBackend *chr = u->user->chr;
GMainContext *prev_ctxt = chr->chr->gcontext;
GMainContext *ctxt = g_main_context_new();
GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
struct vhost_user_read_cb_data data = {
.dev = dev,
.loop = loop,
.msg = msg,
.ret = 0
};
/*
* We want to be able to monitor the slave channel fd while waiting
* for chr I/O. This requires an event loop, but we can't nest the
* one to which chr is currently attached : its fd handlers might not
* be prepared for re-entrancy. So we create a new one and switch chr
* to use it.
*/
slave_update_read_handler(dev, ctxt);
qemu_chr_be_update_read_handlers(chr->chr, ctxt);
qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
g_main_loop_run(loop);
/*
* Restore the previous event loop context. This also destroys/recreates
* event sources : this guarantees that all pending events in the original
* context that have been processed by the nested loop are purged.
*/
qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
slave_update_read_handler(dev, NULL);
g_main_loop_unref(loop);
g_main_context_unref(ctxt);
return data.ret;
return 0;
}
static int process_message_reply(struct vhost_dev *dev,
@ -1802,7 +1716,9 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev)
return -ECONNREFUSED;
}
u->slave_ioc = ioc;
slave_update_read_handler(dev, NULL);
u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
G_IO_IN | G_IO_HUP,
slave_read, dev, NULL, NULL);
if (reply_supported) {
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
@ -2108,8 +2024,8 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
} else {
if (virtio_has_feature(protocol_features,
VHOST_USER_PROTOCOL_F_CONFIG)) {
warn_reportf_err(*errp, "vhost-user backend supports "
"VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
warn_report("vhost-user backend supports "
"VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
}
}

View file

@ -707,26 +707,11 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
return ret;
}
static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
{
if (!v->shadow_vqs_enabled) {
return;
}
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
vhost_svq_stop(svq);
}
}
static int vhost_vdpa_reset_device(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
int ret;
uint8_t status = 0;
vhost_vdpa_reset_svq(v);
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev, status);
return ret;
@ -1088,6 +1073,8 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
vhost_svq_stop(svq);
vhost_vdpa_svq_unmap_rings(dev, svq);
}
}

View file

@ -20,6 +20,7 @@
#include "qemu/range.h"
#include "qemu/error-report.h"
#include "qemu/memfd.h"
#include "qemu/log.h"
#include "standard-headers/linux/vhost_types.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
@ -106,6 +107,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
}
}
static bool vhost_dev_has_iommu(struct vhost_dev *dev)
{
VirtIODevice *vdev = dev->vdev;
/*
* For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
* incremental memory mapping API via IOTLB API. For platform that
* does not have IOMMU, there's no need to enable this feature
* which may cause unnecessary IOTLB miss/update transactions.
*/
if (vdev) {
return virtio_bus_device_iommu_enabled(vdev) &&
virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
} else {
return false;
}
}
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
MemoryRegionSection *section,
hwaddr first,
@ -137,8 +156,51 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
continue;
}
vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
range_get_last(vq->used_phys, vq->used_size));
if (vhost_dev_has_iommu(dev)) {
IOMMUTLBEntry iotlb;
hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
hwaddr phys, s, offset;
while (used_size) {
rcu_read_lock();
iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
used_phys,
true,
MEMTXATTRS_UNSPECIFIED);
rcu_read_unlock();
if (!iotlb.target_as) {
qemu_log_mask(LOG_GUEST_ERROR, "translation "
"failure for used_iova %"PRIx64"\n",
used_phys);
return -EINVAL;
}
offset = used_phys & iotlb.addr_mask;
phys = iotlb.translated_addr + offset;
/*
* Distance from start of used ring until last byte of
* IOMMU page.
*/
s = iotlb.addr_mask - offset;
/*
* Size of used ring, or of the part of it until end
* of IOMMU page. To avoid zero result, do the adding
* outside of MIN().
*/
s = MIN(s, used_size - 1) + 1;
vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
range_get_last(phys, s));
used_size -= s;
used_phys += s;
}
} else {
vhost_dev_sync_region(dev, section, start_addr,
end_addr, vq->used_phys,
range_get_last(vq->used_phys, vq->used_size));
}
}
return 0;
}
@ -306,24 +368,6 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
dev->log_size = size;
}
static bool vhost_dev_has_iommu(struct vhost_dev *dev)
{
VirtIODevice *vdev = dev->vdev;
/*
* For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
* incremental memory mapping API via IOTLB API. For platform that
* does not have IOMMU, there's no need to enable this feature
* which may cause unnecessary IOTLB miss/update transactions.
*/
if (vdev) {
return virtio_bus_device_iommu_enabled(vdev) &&
virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
} else {
return false;
}
}
static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
hwaddr *plen, bool is_write)
{
@ -1890,6 +1934,9 @@ fail_vq:
}
fail_mem:
if (vhost_dev_has_iommu(hdev)) {
memory_listener_unregister(&hdev->iommu_listener);
}
fail_features:
vdev->vhost_started = false;
hdev->started = false;

View file

@ -476,15 +476,17 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
size_t max_len;
CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info;
max_len = op_info->iv_len +
op_info->aad_len +
op_info->src_len +
op_info->dst_len +
op_info->digest_result_len;
if (op_info) {
max_len = op_info->iv_len +
op_info->aad_len +
op_info->src_len +
op_info->dst_len +
op_info->digest_result_len;
/* Zeroize and free request data structure */
memset(op_info, 0, sizeof(*op_info) + max_len);
g_free(op_info);
/* Zeroize and free request data structure */
memset(op_info, 0, sizeof(*op_info) + max_len);
g_free(op_info);
}
} else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info;
if (op_info) {

View file

@ -235,7 +235,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem,
uint64_t offset, size;
int ret = 0;
first_bit = s->offset_within_region / vmem->bitmap_size;
first_bit = s->offset_within_region / vmem->block_size;
first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit);
while (first_bit < vmem->bitmap_size) {
MemoryRegionSection tmp = *s;
@ -267,7 +267,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem,
uint64_t offset, size;
int ret = 0;
first_bit = s->offset_within_region / vmem->bitmap_size;
first_bit = s->offset_within_region / vmem->block_size;
first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit);
while (first_bit < vmem->bitmap_size) {
MemoryRegionSection tmp = *s;
@ -341,7 +341,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset,
if (ret) {
/* Notify all already-notified listeners. */
QLIST_FOREACH(rdl2, &vmem->rdl_list, next) {
MemoryRegionSection tmp = *rdl->section;
MemoryRegionSection tmp = *rdl2->section;
if (rdl2 == rdl) {
break;

View file

@ -1478,7 +1478,7 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
unsigned int max, idx;
unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
int64_t len = 0;
@ -1487,13 +1487,12 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
max = vq->vring.num;
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
MemoryRegionCache *desc_cache = &caches->desc;
unsigned int num_bufs;
VRingDesc desc;
unsigned int i;
unsigned int max = vq->vring.num;
num_bufs = total_bufs;
@ -1615,7 +1614,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
unsigned int max, idx;
unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache *desc_cache;
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
@ -1627,14 +1626,14 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
wrap_counter = vq->last_avail_wrap_counter;
total_bufs = in_total = out_total = 0;
max = vq->vring.num;
for (;;) {
unsigned int num_bufs = total_bufs;
unsigned int i = idx;
int rc;
unsigned int max = vq->vring.num;
desc_cache = &caches->desc;
vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
if (!is_desc_avail(desc.flags, wrap_counter)) {
break;

View file

@ -57,6 +57,7 @@
#include <sys/ioctl.h>
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/xen/xen.h"
@ -780,15 +781,6 @@ static void xen_pt_realize(PCIDevice *d, Error **errp)
s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
s->dev.devfn);
xen_host_pci_device_get(&s->real_device,
s->hostaddr.domain, s->hostaddr.bus,
s->hostaddr.slot, s->hostaddr.function,
errp);
if (*errp) {
error_append_hint(errp, "Failed to \"open\" the real pci device");
return;
}
s->is_virtfn = s->real_device.is_virtfn;
if (s->is_virtfn) {
XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
@ -803,8 +795,10 @@ static void xen_pt_realize(PCIDevice *d, Error **errp)
s->io_listener = xen_pt_io_listener;
/* Setup VGA bios for passthrough GFX */
if ((s->real_device.domain == 0) && (s->real_device.bus == 0) &&
(s->real_device.dev == 2) && (s->real_device.func == 0)) {
if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) &&
(s->real_device.bus == XEN_PCI_IGD_BUS) &&
(s->real_device.dev == XEN_PCI_IGD_DEV) &&
(s->real_device.func == XEN_PCI_IGD_FN)) {
if (!is_igd_vga_passthrough(&s->real_device)) {
error_setg(errp, "Need to enable igd-passthru if you're trying"
" to passthrough IGD GFX");
@ -950,11 +944,58 @@ static void xen_pci_passthrough_instance_init(Object *obj)
PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
void xen_igd_reserve_slot(PCIBus *pci_bus)
{
if (!xen_igd_gfx_pt_enabled()) {
return;
}
XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n");
pci_bus->slot_reserved_mask |= XEN_PCI_IGD_SLOT_MASK;
}
static void xen_igd_clear_slot(DeviceState *qdev, Error **errp)
{
ERRP_GUARD();
PCIDevice *pci_dev = (PCIDevice *)qdev;
XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev);
XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s);
PCIBus *pci_bus = pci_get_bus(pci_dev);
xen_host_pci_device_get(&s->real_device,
s->hostaddr.domain, s->hostaddr.bus,
s->hostaddr.slot, s->hostaddr.function,
errp);
if (*errp) {
error_append_hint(errp, "Failed to \"open\" the real pci device");
return;
}
if (!(pci_bus->slot_reserved_mask & XEN_PCI_IGD_SLOT_MASK)) {
xpdc->pci_qdev_realize(qdev, errp);
return;
}
if (is_igd_vga_passthrough(&s->real_device) &&
s->real_device.domain == XEN_PCI_IGD_DOMAIN &&
s->real_device.bus == XEN_PCI_IGD_BUS &&
s->real_device.dev == XEN_PCI_IGD_DEV &&
s->real_device.func == XEN_PCI_IGD_FN &&
s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) {
pci_bus->slot_reserved_mask &= ~XEN_PCI_IGD_SLOT_MASK;
XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n");
}
xpdc->pci_qdev_realize(qdev, errp);
}
static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass);
xpdc->pci_qdev_realize = dc->realize;
dc->realize = xen_igd_clear_slot;
k->realize = xen_pt_realize;
k->exit = xen_pt_unregister_device;
k->config_read = xen_pt_pci_read_config;
@ -977,6 +1018,7 @@ static const TypeInfo xen_pci_passthrough_info = {
.instance_size = sizeof(XenPCIPassthroughState),
.instance_finalize = xen_pci_passthrough_finalize,
.class_init = xen_pci_passthrough_class_init,
.class_size = sizeof(XenPTDeviceClass),
.instance_init = xen_pci_passthrough_instance_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },

View file

@ -41,7 +41,20 @@ typedef struct XenPTReg XenPTReg;
#define TYPE_XEN_PT_DEVICE "xen-pci-passthrough"
OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE)
#define XEN_PT_DEVICE_CLASS(klass) \
OBJECT_CLASS_CHECK(XenPTDeviceClass, klass, TYPE_XEN_PT_DEVICE)
#define XEN_PT_DEVICE_GET_CLASS(obj) \
OBJECT_GET_CLASS(XenPTDeviceClass, obj, TYPE_XEN_PT_DEVICE)
typedef void (*XenPTQdevRealize)(DeviceState *qdev, Error **errp);
typedef struct XenPTDeviceClass {
PCIDeviceClass parent_class;
XenPTQdevRealize pci_qdev_realize;
} XenPTDeviceClass;
uint32_t igd_read_opregion(XenPCIPassthroughState *s);
void xen_igd_reserve_slot(PCIBus *pci_bus);
void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val);
void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
XenHostPCIDevice *dev);
@ -76,6 +89,13 @@ typedef int (*xen_pt_conf_byte_read)
#define XEN_PCI_INTEL_OPREGION 0xfc
#define XEN_PCI_IGD_DOMAIN 0
#define XEN_PCI_IGD_BUS 0
#define XEN_PCI_IGD_DEV 2
#define XEN_PCI_IGD_FN 0
#define XEN_PCI_IGD_SLOT_MASK \
(1UL << PCI_SLOT(PCI_DEVFN(XEN_PCI_IGD_DEV, XEN_PCI_IGD_FN)))
typedef enum {
XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */
XEN_PT_GRP_TYPE_EMU, /* emul reg group */

View file

@ -20,3 +20,7 @@ void xen_igd_gfx_pt_set(bool value, Error **errp)
error_setg(errp, "Xen PCI passthrough support not built in");
}
}
void xen_igd_reserve_slot(PCIBus *pci_bus)
{
}

View file

@ -183,4 +183,53 @@ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
const struct arm_boot_info *info,
hwaddr mvbar_addr);
typedef enum {
FIXUP_NONE = 0, /* do nothing */
FIXUP_TERMINATOR, /* end of insns */
FIXUP_BOARDID, /* overwrite with board ID number */
FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */
FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */
FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */
FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */
FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */
FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */
FIXUP_BOOTREG, /* overwrite with boot register address */
FIXUP_DSB, /* overwrite with correct DSB insn for cpu */
FIXUP_MAX,
} FixupType;
typedef struct ARMInsnFixup {
uint32_t insn;
FixupType fixup;
} ARMInsnFixup;
/**
* arm_write_bootloader - write a bootloader to guest memory
* @name: name of the bootloader blob
* @as: AddressSpace to write the bootloader
* @addr: guest address to write it
* @insns: the blob to be loaded
* @fixupcontext: context to be used for any fixups in @insns
*
* Write a bootloader to guest memory at address @addr in the address
* space @as. @name is the name to use for the resulting ROM blob, so
* it should be unique in the system and reasonably identifiable for debugging.
*
* @insns must be an array of ARMInsnFixup structs, each of which has
* one 32-bit value to be written to the guest memory, and a fixup to be
* applied to the value. FIXUP_NONE (do nothing) is value 0, so effectively
* the fixup is optional when writing a struct initializer.
* The final entry in the array must be { 0, FIXUP_TERMINATOR }.
*
* All other supported fixup types have the semantics "ignore insn
* and instead use the value from the array element @fixupcontext[fixup]".
* The caller should therefore provide @fixupcontext as an array of
* size FIXUP_MAX whose elements have been initialized for at least
* the entries that @insns refers to.
*/
void arm_write_bootloader(const char *name,
AddressSpace *as, hwaddr addr,
const ARMInsnFixup *insns,
const uint32_t *fixupcontext);
#endif /* HW_ARM_BOOT_H */

View file

@ -128,9 +128,6 @@ struct PCMachineClass {
/* create kvmclock device even when KVM PV features are not exposed */
bool kvmclock_create_always;
/* skip passing an rng seed for legacy machines */
bool legacy_no_rng_seed;
};
#define TYPE_PC_MACHINE "generic-pc-machine"

View file

@ -126,8 +126,7 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
void x86_load_linux(X86MachineState *x86ms,
FWCfgState *fw_cfg,
int acpi_data_size,
bool pvh_enabled,
bool legacy_no_rng_seed);
bool pvh_enabled);
bool x86_machine_is_smm_enabled(const X86MachineState *x86ms);
bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms);

View file

@ -117,28 +117,6 @@ struct FWCfgMemState {
*/
void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len);
/**
* fw_cfg_add_bytes_callback:
* @s: fw_cfg device being modified
* @key: selector key value for new fw_cfg item
* @select_cb: callback function when selecting
* @write_cb: callback function after a write
* @callback_opaque: argument to be passed into callback function
* @data: pointer to start of item data
* @len: size of item data
* @read_only: is file read only
*
* Add a new fw_cfg item, available by selecting the given key, as a raw
* "blob" of the given size. The data referenced by the starting pointer
* is only linked, NOT copied, into the data structure of the fw_cfg device.
*/
void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
FWCfgCallback select_cb,
FWCfgWriteCallback write_cb,
void *callback_opaque,
void *data, size_t len,
bool read_only);
/**
* fw_cfg_add_string:
* @s: fw_cfg device being modified

View file

@ -23,7 +23,7 @@ struct VHostUserGPIO {
VirtIODevice parent_obj;
CharBackend chardev;
struct virtio_gpio_config config;
struct vhost_virtqueue *vhost_vq;
struct vhost_virtqueue *vhost_vqs;
struct vhost_dev vhost_dev;
VhostUserState vhost_user;
VirtQueue *command_vq;

View file

@ -188,4 +188,17 @@
#define QEMU_DISABLE_CFI
#endif
/*
* Apple clang version 14 has a bug in its __builtin_subcll(); define
* BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it.
* When a version of Apple clang which has this bug fixed is released
* we can add an upper bound to this check.
* See https://gitlab.com/qemu-project/qemu/-/issues/1631
* and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details.
* The bug never made it into any upstream LLVM releases, only Apple ones.
*/
#if defined(__apple_build_version__) && __clang_major__ >= 14
#define BUILTIN_SUBCLL_BROKEN
#endif
#endif /* COMPILER_H */

View file

@ -596,7 +596,7 @@ static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
*/
static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
{
#if __has_builtin(__builtin_subcll)
#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN)
unsigned long long b = *pborrow;
x = __builtin_subcll(x, y, b, &b);
*pborrow = b & 1;

View file

@ -74,6 +74,9 @@ qio_channel_tls_new_server(QIOChannel *master,
ioc = QIO_CHANNEL_TLS(object_new(TYPE_QIO_CHANNEL_TLS));
ioc->master = master;
if (qio_channel_has_feature(master, QIO_CHANNEL_FEATURE_SHUTDOWN)) {
qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SHUTDOWN);
}
object_ref(OBJECT(master));
ioc->session = qcrypto_tls_session_new(

View file

@ -1622,7 +1622,7 @@ TargetFdTrans target_signalfd_trans = {
.host_to_target_data = host_to_target_data_signalfd,
};
static abi_long swap_data_eventfd(void *buf, size_t len)
static abi_long swap_data_u64(void *buf, size_t len)
{
uint64_t *counter = buf;
int i;
@ -1640,8 +1640,12 @@ static abi_long swap_data_eventfd(void *buf, size_t len)
}
TargetFdTrans target_eventfd_trans = {
.host_to_target_data = swap_data_eventfd,
.target_to_host_data = swap_data_eventfd,
.host_to_target_data = swap_data_u64,
.target_to_host_data = swap_data_u64,
};
TargetFdTrans target_timerfd_trans = {
.host_to_target_data = swap_data_u64,
};
#if defined(CONFIG_INOTIFY) && (defined(TARGET_NR_inotify_init) || \

View file

@ -130,6 +130,7 @@ extern TargetFdTrans target_netlink_route_trans;
extern TargetFdTrans target_netlink_audit_trans;
extern TargetFdTrans target_signalfd_trans;
extern TargetFdTrans target_eventfd_trans;
extern TargetFdTrans target_timerfd_trans;
#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
(defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
defined(__NR_inotify_init1))

View file

@ -12,8 +12,8 @@ struct target_rlimit {
};
struct target_rlimit64 {
uint64_t rlim_cur;
uint64_t rlim_max;
abi_ullong rlim_cur;
abi_ullong rlim_max;
};
#define TARGET_RLIM_INFINITY ((abi_ulong)-1)

View file

@ -290,7 +290,10 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
env->CP0_Status |= (1 << CP0St_FR);
env->hflags |= MIPS_HFLAG_F64;
}
} else if (!prog_req.fre && !prog_req.frdefault &&
} else if (prog_req.fr1) {
env->CP0_Status |= (1 << CP0St_FR);
env->hflags |= MIPS_HFLAG_F64;
} else if (!prog_req.fre && !prog_req.frdefault &&
!prog_req.fr1 && !prog_req.single && !prog_req.soft) {
fprintf(stderr, "qemu: Can't find a matching FPU mode\n");
exit(1);

View file

@ -86,6 +86,15 @@ void cpu_loop(CPUS390XState *env)
} else if (ret != -QEMU_ESIGRETURN) {
env->regs[2] = ret;
}
if (unlikely(cs->singlestep_enabled)) {
/*
* cpu_tb_exec() did not raise EXCP_DEBUG, because it has seen
* that EXCP_SVC was already pending.
*/
cs->exception_index = EXCP_DEBUG;
}
break;
case EXCP_DEBUG:

View file

@ -1755,6 +1755,11 @@ static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
lladdr = (struct target_sockaddr_ll *)addr;
lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
} else if (sa_family == AF_INET6) {
struct sockaddr_in6 *in6addr;
in6addr = (struct sockaddr_in6 *)addr;
in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
}
unlock_user(target_saddr, target_addr, 0);
@ -11433,39 +11438,58 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
{
int gidsetsize = arg1;
target_id *target_grouplist;
gid_t *grouplist;
g_autofree gid_t *grouplist = NULL;
int i;
grouplist = alloca(gidsetsize * sizeof(gid_t));
ret = get_errno(getgroups(gidsetsize, grouplist));
if (gidsetsize == 0)
return ret;
if (!is_error(ret)) {
target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
if (!target_grouplist)
return -TARGET_EFAULT;
for(i = 0;i < ret; i++)
target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
if (gidsetsize > NGROUPS_MAX) {
return -TARGET_EINVAL;
}
if (gidsetsize > 0) {
grouplist = g_try_new(gid_t, gidsetsize);
if (!grouplist) {
return -TARGET_ENOMEM;
}
}
ret = get_errno(getgroups(gidsetsize, grouplist));
if (!is_error(ret) && gidsetsize > 0) {
target_grouplist = lock_user(VERIFY_WRITE, arg2,
gidsetsize * sizeof(target_id), 0);
if (!target_grouplist) {
return -TARGET_EFAULT;
}
for (i = 0; i < ret; i++) {
target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
}
unlock_user(target_grouplist, arg2,
gidsetsize * sizeof(target_id));
}
return ret;
}
return ret;
case TARGET_NR_setgroups:
{
int gidsetsize = arg1;
target_id *target_grouplist;
gid_t *grouplist = NULL;
g_autofree gid_t *grouplist = NULL;
int i;
if (gidsetsize) {
grouplist = alloca(gidsetsize * sizeof(gid_t));
target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
return -TARGET_EINVAL;
}
if (gidsetsize > 0) {
grouplist = g_try_new(gid_t, gidsetsize);
if (!grouplist) {
return -TARGET_ENOMEM;
}
target_grouplist = lock_user(VERIFY_READ, arg2,
gidsetsize * sizeof(target_id), 1);
if (!target_grouplist) {
return -TARGET_EFAULT;
}
for (i = 0; i < gidsetsize; i++) {
grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
}
unlock_user(target_grouplist, arg2, 0);
unlock_user(target_grouplist, arg2,
gidsetsize * sizeof(target_id));
}
return get_errno(setgroups(gidsetsize, grouplist));
}
@ -11750,41 +11774,59 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
{
int gidsetsize = arg1;
uint32_t *target_grouplist;
gid_t *grouplist;
g_autofree gid_t *grouplist = NULL;
int i;
grouplist = alloca(gidsetsize * sizeof(gid_t));
if (gidsetsize > NGROUPS_MAX) {
return -TARGET_EINVAL;
}
if (gidsetsize > 0) {
grouplist = g_try_new(gid_t, gidsetsize);
if (!grouplist) {
return -TARGET_ENOMEM;
}
}
ret = get_errno(getgroups(gidsetsize, grouplist));
if (gidsetsize == 0)
return ret;
if (!is_error(ret)) {
target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
if (!is_error(ret) && gidsetsize > 0) {
target_grouplist = lock_user(VERIFY_WRITE, arg2,
gidsetsize * 4, 0);
if (!target_grouplist) {
return -TARGET_EFAULT;
}
for(i = 0;i < ret; i++)
for (i = 0; i < ret; i++) {
target_grouplist[i] = tswap32(grouplist[i]);
}
unlock_user(target_grouplist, arg2, gidsetsize * 4);
}
return ret;
}
return ret;
#endif
#ifdef TARGET_NR_setgroups32
case TARGET_NR_setgroups32:
{
int gidsetsize = arg1;
uint32_t *target_grouplist;
gid_t *grouplist;
g_autofree gid_t *grouplist = NULL;
int i;
grouplist = alloca(gidsetsize * sizeof(gid_t));
target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
if (!target_grouplist) {
return -TARGET_EFAULT;
if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
return -TARGET_EINVAL;
}
if (gidsetsize > 0) {
grouplist = g_try_new(gid_t, gidsetsize);
if (!grouplist) {
return -TARGET_ENOMEM;
}
target_grouplist = lock_user(VERIFY_READ, arg2,
gidsetsize * 4, 1);
if (!target_grouplist) {
return -TARGET_EFAULT;
}
for (i = 0; i < gidsetsize; i++) {
grouplist[i] = tswap32(target_grouplist[i]);
}
unlock_user(target_grouplist, arg2, 0);
}
for(i = 0;i < gidsetsize; i++)
grouplist[i] = tswap32(target_grouplist[i]);
unlock_user(target_grouplist, arg2, 0);
return get_errno(setgroups(gidsetsize, grouplist));
}
#endif
@ -12883,8 +12925,8 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
return -TARGET_EFAULT;
}
rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
rnew.rlim_max = tswap64(target_rnew->rlim_max);
__get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
__get_user(rnew.rlim_max, &target_rnew->rlim_max);
unlock_user_struct(target_rnew, arg3, 0);
rnewp = &rnew;
}
@ -12894,8 +12936,8 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
return -TARGET_EFAULT;
}
target_rold->rlim_cur = tswap64(rold.rlim_cur);
target_rold->rlim_max = tswap64(rold.rlim_max);
__put_user(rold.rlim_cur, &target_rold->rlim_cur);
__put_user(rold.rlim_max, &target_rold->rlim_max);
unlock_user_struct(target_rold, arg4, 1);
}
return ret;
@ -13115,8 +13157,12 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
case TARGET_NR_timerfd_create:
return get_errno(timerfd_create(arg1,
target_to_host_bitmask(arg2, fcntl_flags_tbl)));
ret = get_errno(timerfd_create(arg1,
target_to_host_bitmask(arg2, fcntl_flags_tbl)));
if (ret >= 0) {
fd_trans_register(ret, &target_timerfd_trans);
}
return ret;
#endif
#if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)

View file

@ -2831,7 +2831,7 @@ config_host_data.set('CONFIG_SLIRP', slirp.found())
genh += configure_file(output: 'config-host.h', configuration: config_host_data)
hxtool = find_program('scripts/hxtool')
shaderinclude = find_program('scripts/shaderinclude.pl')
shaderinclude = find_program('scripts/shaderinclude.py')
qapi_gen = find_program('scripts/qapi-gen.py')
qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py',
meson.current_source_dir() / 'scripts/qapi/commands.py',
@ -3245,6 +3245,10 @@ modinfo_files = []
block_mods = []
softmmu_mods = []
foreach d, list : modules
if not (d == 'block' ? have_block : have_system)
continue
endif
foreach m, module_ss : list
if enable_modules and targetos != 'windows'
module_ss = module_ss.apply(config_all, strict: false)

View file

@ -3320,7 +3320,6 @@ static void migration_completion(MigrationState *s)
ret = global_state_store();
if (!ret) {
bool inactivate = !migrate_colo_enabled();
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
trace_migration_completion_vm_stop(ret);
if (ret >= 0) {
@ -3328,12 +3327,15 @@ static void migration_completion(MigrationState *s)
MIGRATION_STATUS_DEVICE);
}
if (ret >= 0) {
/*
* Inactivate disks except in COLO, and track that we
* have done so in order to remember to reactivate
* them if migration fails or is cancelled.
*/
s->block_inactive = !migrate_colo_enabled();
qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
inactivate);
}
if (inactivate && ret >= 0) {
s->block_inactive = true;
s->block_inactive);
}
}
qemu_mutex_unlock_iothread();
@ -3370,13 +3372,13 @@ static void migration_completion(MigrationState *s)
rp_error = await_return_path_close_on_source(s);
trace_migration_return_path_end_after(rp_error);
if (rp_error) {
goto fail_invalidate;
goto fail;
}
}
if (qemu_file_get_error(s->to_dst_file)) {
trace_migration_completion_file_err();
goto fail_invalidate;
goto fail;
}
if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) {
@ -3390,12 +3392,13 @@ static void migration_completion(MigrationState *s)
return;
fail_invalidate:
/* If not doing postcopy, vm_start() will be called: let's regain
* control on images.
*/
if (s->state == MIGRATION_STATUS_ACTIVE ||
s->state == MIGRATION_STATUS_DEVICE) {
fail:
if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
s->state == MIGRATION_STATUS_DEVICE)) {
/*
* If not doing postcopy, vm_start() will be called: let's
* regain control on images.
*/
Error *local_err = NULL;
qemu_mutex_lock_iothread();
@ -3408,7 +3411,6 @@ fail_invalidate:
qemu_mutex_unlock_iothread();
}
fail:
migrate_set_state(&s->state, current_active_state,
MIGRATION_STATUS_FAILED);
}

View file

@ -1765,13 +1765,15 @@ out:
static inline void populate_read_range(RAMBlock *block, ram_addr_t offset,
ram_addr_t size)
{
const ram_addr_t end = offset + size;
/*
* We read one byte of each page; this will preallocate page tables if
* required and populate the shared zeropage on MAP_PRIVATE anonymous memory
* where no page was populated yet. This might require adaption when
* supporting other mappings, like shmem.
*/
for (; offset < size; offset += block->page_size) {
for (; offset < end; offset += block->page_size) {
char tmp = *((char *)block->host + offset);
/* Don't optimize the read out */
@ -1885,13 +1887,14 @@ int ram_write_tracking_start(void)
block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) {
goto fail;
}
block->flags |= RAM_UF_WRITEPROTECT;
memory_region_ref(block->mr);
/* Apply UFFD write protection to the block memory range */
if (uffd_change_protection(rs->uffdio_fd, block->host,
block->max_length, true, false)) {
goto fail;
}
block->flags |= RAM_UF_WRITEPROTECT;
memory_region_ref(block->mr);
trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size,
block->host, block->max_length);

View file

@ -49,6 +49,7 @@ const int vdpa_feature_bits[] = {
VIRTIO_F_VERSION_1,
VIRTIO_NET_F_CSUM,
VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_F_GSO,
VIRTIO_NET_F_GUEST_TSO4,
VIRTIO_NET_F_GUEST_TSO6,
@ -160,6 +161,14 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
struct vhost_dev *dev = &s->vhost_net->dev;
/*
* If a peer NIC is attached, do not cleanup anything.
* Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
* when the guest is shutting down.
*/
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
return;
}
qemu_vfree(s->cvq_cmd_out_buffer);
qemu_vfree(s->status);
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
@ -500,7 +509,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
}
if (*s->status != VIRTIO_NET_OK) {
return VIRTIO_NET_ERR;
goto out;
}
status = VIRTIO_NET_ERR;

Binary file not shown.

View file

@ -1,5 +1,5 @@
keymaps = {
'ar': '-l ar',
'ar': '-l ara',
'bepo': '-l fr -v dvorak',
'cz': '-l cz',
'da': '-l dk',

View file

@ -1140,10 +1140,22 @@ have gone through several iterations as the feature set and complexity
of the block layer have grown. Many online guides to QEMU often
reference older and deprecated options, which can lead to confusion.
The recommended modern way to describe disks is to use a combination of
The most explicit way to describe disks is to use a combination of
``-device`` to specify the hardware device and ``-blockdev`` to
describe the backend. The device defines what the guest sees and the
backend describes how QEMU handles the data.
backend describes how QEMU handles the data. It is the only guaranteed
stable interface for describing block devices and as such is
recommended for management tools and scripting.
The ``-drive`` option combines the device and backend into a single
command line option which is a more human friendly. There is however no
interface stability guarantee although some older board models still
need updating to work with the modern blockdev forms.
Older options like ``-hda`` are essentially macros which expand into
``-drive`` options for various drive interfaces. The original forms
bake in a lot of assumptions from the days when QEMU was emulating a
legacy PC, they are not recommended for modern configurations.
ERST
@ -1636,6 +1648,14 @@ SRST
the raw disk image you use is not written back. You can however
force the write back by pressing C-a s (see the :ref:`disk images`
chapter in the System Emulation Users Guide).
.. warning::
snapshot is incompatible with ``-blockdev`` (instead use qemu-img
to manually create snapshot images to attach to your blockdev).
If you have mixed ``-blockdev`` and ``-drive`` declarations you
can use the 'snapshot' property on your drive declarations
instead of this global option.
ERST
DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev,

View file

@ -1925,10 +1925,10 @@ static void guest_suspend(SuspendMode mode, Error **errp)
if (systemd_supports_mode(mode, &local_err)) {
mode_supported = true;
systemd_suspend(mode, &local_err);
}
if (!local_err) {
return;
if (!local_err) {
return;
}
}
error_free(local_err);
@ -1937,10 +1937,10 @@ static void guest_suspend(SuspendMode mode, Error **errp)
if (pmutils_supports_mode(mode, &local_err)) {
mode_supported = true;
pmutils_suspend(mode, &local_err);
}
if (!local_err) {
return;
if (!local_err) {
return;
}
}
error_free(local_err);

View file

@ -32,9 +32,8 @@
#define GUEST_FILE_READ_COUNT_MAX (48 * MiB)
/* Note: in some situations, like with the fsfreeze, logging may be
* temporarilly disabled. if it is necessary that a command be able
* to log for accounting purposes, check ga_logging_enabled() beforehand,
* and use the QERR_QGA_LOGGING_DISABLED to generate an error
* temporarily disabled. if it is necessary that a command be able
* to log for accounting purposes, check ga_logging_enabled() beforehand.
*/
void slog(const gchar *fmt, ...)
{

View file

@ -31,6 +31,7 @@
/>
<Media Id="1" Cabinet="qemu_ga.$(var.QEMU_GA_VERSION).cab" EmbedCab="yes" />
<Property Id="WHSLogo">1</Property>
<Property Id="ARPNOMODIFY" Value="yes" Secure="yes" />
<MajorUpgrade
DowngradeErrorMessage="Error: A newer version of QEMU guest agent is already installed."
/>

View file

@ -518,7 +518,7 @@ namespace _com_util
/* Stop QGA VSS provider service using Winsvc API */
STDAPI StopService(void)
{
HRESULT hr;
HRESULT hr = S_OK;
SC_HANDLE manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS);
SC_HANDLE service = NULL;

@ -1 +1 @@
Subproject commit 458626c4c6441045c0612f24313c7cf1f95e71c6
Subproject commit 673d2595d4f773cc266cbf8dbaf2f475a6adb949

Some files were not shown because too many files have changed in this diff Show more