diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index d21b4a1fd4..10886bb414 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -109,8 +109,8 @@ crash-test-debian: IMAGE: debian-amd64 script: - cd build - - make check-venv - - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386 + - make NINJA=":" check-venv + - tests/venv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386 build-system-fedora: extends: .native_build_job_template @@ -155,7 +155,7 @@ crash-test-fedora: IMAGE: fedora script: - cd build - - make check-venv + - make NINJA=":" check-venv - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32 diff --git a/QEMU_VERSION b/QEMU_VERSION index 0ee843cc60..2bbaead448 100644 --- a/QEMU_VERSION +++ b/QEMU_VERSION @@ -1 +1 @@ -7.2.0 +7.2.4 diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index c28e1ea41b..9ea4904fac 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1826,7 +1826,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } else /* if (prot & PAGE_READ) */ { tlb_addr = tlbe->addr_read; if (!tlb_hit(tlb_addr, addr)) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { + if (!VICTIM_TLB_HIT(addr_read, addr)) { tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); diff --git a/block/curl.c b/block/curl.c index cba4c4cac7..0b125095e3 100644 --- a/block/curl.c +++ b/block/curl.c @@ -37,8 +37,15 @@ // #define DEBUG_VERBOSE +/* CURL 7.85.0 switches to a string based API for specifying + * the desired protocols. + */ +#if LIBCURL_VERSION_NUM >= 0x075500 +#define PROTOCOLS "HTTP,HTTPS,FTP,FTPS" +#else #define PROTOCOLS (CURLPROTO_HTTP | CURLPROTO_HTTPS | \ CURLPROTO_FTP | CURLPROTO_FTPS) +#endif #define CURL_NUM_STATES 8 #define CURL_NUM_ACB 8 @@ -509,9 +516,18 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state) * obscure protocols. For example, do not allow POP3/SMTP/IMAP see * CVE-2013-0249. * - * Restricting protocols is only supported from 7.19.4 upwards. + * Restricting protocols is only supported from 7.19.4 upwards. Note: + * version 7.85.0 deprecates CURLOPT_*PROTOCOLS in favour of a string + * based CURLOPT_*PROTOCOLS_STR API. */ -#if LIBCURL_VERSION_NUM >= 0x071304 +#if LIBCURL_VERSION_NUM >= 0x075500 + if (curl_easy_setopt(state->curl, + CURLOPT_PROTOCOLS_STR, PROTOCOLS) || + curl_easy_setopt(state->curl, + CURLOPT_REDIR_PROTOCOLS_STR, PROTOCOLS)) { + goto err; + } +#elif LIBCURL_VERSION_NUM >= 0x071304 if (curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS) || curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS)) { goto err; @@ -669,7 +685,12 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, const char *file; const char *cookie; const char *cookie_secret; - double d; + /* CURL >= 7.55.0 uses curl_off_t for content length instead of a double */ +#if LIBCURL_VERSION_NUM >= 0x073700 + curl_off_t cl; +#else + double cl; +#endif const char *secretid; const char *protocol_delimiter; int ret; @@ -796,27 +817,36 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, } if (curl_easy_perform(state->curl)) goto out; - if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d)) { + /* CURL 7.55.0 deprecates CURLINFO_CONTENT_LENGTH_DOWNLOAD in favour of + * the *_T version which returns a more sensible type for content length. + */ +#if LIBCURL_VERSION_NUM >= 0x073700 + if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD_T, &cl)) { goto out; } +#else + if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &cl)) { + goto out; + } +#endif /* Prior CURL 7.19.4 return value of 0 could mean that the file size is not * know or the size is zero. From 7.19.4 CURL returns -1 if size is not * known and zero if it is really zero-length file. */ #if LIBCURL_VERSION_NUM >= 0x071304 - if (d < 0) { + if (cl < 0) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Server didn't report file size."); goto out; } #else - if (d <= 0) { + if (cl <= 0) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Unknown file size or zero-length file."); goto out; } #endif - s->len = d; + s->len = cl; if ((!strncasecmp(s->url, "http://", strlen("http://")) || !strncasecmp(s->url, "https://", strlen("https://"))) diff --git a/block/io.c b/block/io.c index b9424024f9..bbaa0d1b2d 100644 --- a/block/io.c +++ b/block/io.c @@ -2087,6 +2087,9 @@ static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { flags |= BDRV_REQ_MAY_UNMAP; } + + /* Can't use optimization hint with bufferless zero write */ + flags &= ~BDRV_REQ_REGISTERED_BUF; } if (ret < 0) { diff --git a/block/iscsi.c b/block/iscsi.c index a316d46d96..1bba42a71b 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -268,6 +268,7 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, timer_mod(&iTask->retry_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time); iTask->do_retry = 1; + return; } else if (status == SCSI_STATUS_CHECK_CONDITION) { int error = iscsi_translate_sense(&task->sense); if (error == EAGAIN) { diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index b6135e9bfe..cf21b5e40a 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -213,15 +213,17 @@ void hmp_commit(Monitor *mon, const QDict *qdict) error_report("Device '%s' not found", device); return; } - if (!blk_is_available(blk)) { - error_report("Device '%s' has no medium", device); - return; - } bs = bdrv_skip_implicit_filters(blk_bs(blk)); aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); + if (!blk_is_available(blk)) { + error_report("Device '%s' has no medium", device); + aio_context_release(aio_context); + return; + } + ret = bdrv_commit(bs); aio_context_release(aio_context); diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c index bcad567c0c..3dff99ba06 100644 --- a/block/qcow2-bitmap.c +++ b/block/qcow2-bitmap.c @@ -115,7 +115,7 @@ static int update_header_sync(BlockDriverState *bs) return bdrv_flush(bs->file->bs); } -static inline void bitmap_table_to_be(uint64_t *bitmap_table, size_t size) +static inline void bitmap_table_bswap_be(uint64_t *bitmap_table, size_t size) { size_t i; @@ -1401,9 +1401,10 @@ static int store_bitmap(BlockDriverState *bs, Qcow2Bitmap *bm, Error **errp) goto fail; } - bitmap_table_to_be(tb, tb_size); + bitmap_table_bswap_be(tb, tb_size); ret = bdrv_pwrite(bs->file, tb_offset, tb_size * sizeof(tb[0]), tb, 0); if (ret < 0) { + bitmap_table_bswap_be(tb, tb_size); error_setg_errno(errp, -ret, "Failed to write bitmap '%s' to file", bm_name); goto fail; diff --git a/block/vhdx-log.c b/block/vhdx-log.c index 572582b87b..0866897a85 100644 --- a/block/vhdx-log.c +++ b/block/vhdx-log.c @@ -980,7 +980,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, sector_write = merged_sector; } else if (i == sectors - 1 && trailing_length) { /* partial sector at the end of the buffer */ - ret = bdrv_pread(bs->file, file_offset, + ret = bdrv_pread(bs->file, file_offset + trailing_length, VHDX_LOG_SECTOR_SIZE - trailing_length, merged_sector + trailing_length, 0); if (ret < 0) { diff --git a/blockdev.c b/blockdev.c index 08c62c699d..0f0529d330 100644 --- a/blockdev.c +++ b/blockdev.c @@ -152,12 +152,22 @@ void blockdev_mark_auto_del(BlockBackend *blk) JOB_LOCK_GUARD(); - for (job = block_job_next_locked(NULL); job; - job = block_job_next_locked(job)) { - if (block_job_has_bdrv(job, blk_bs(blk))) { + do { + job = block_job_next_locked(NULL); + while (job && (job->job.cancelled || + job->job.deferred_to_main_loop || + !block_job_has_bdrv(job, blk_bs(blk)))) + { + job = block_job_next_locked(job); + } + if (job) { + /* + * This drops the job lock temporarily and polls, so we need to + * restart processing the list from the start after this. + */ job_cancel_locked(&job->job, false); } - } + } while (job); dinfo->auto_del = 1; } diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 879564aa8a..b00efb1482 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -1065,6 +1065,7 @@ static void char_socket_finalize(Object *obj) qio_net_listener_set_client_func_full(s->listener, NULL, NULL, NULL, chr->gcontext); object_unref(OBJECT(s->listener)); + s->listener = NULL; } if (s->tls_creds) { object_unref(OBJECT(s->tls_creds)); diff --git a/configure b/configure index fc71701bbb..706200c198 100755 --- a/configure +++ b/configure @@ -2430,7 +2430,7 @@ echo "QEMU_OBJCFLAGS=$QEMU_OBJCFLAGS" >> $config_host_mak echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak echo "GLIB_LIBS=$glib_libs" >> $config_host_mak echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak -echo "GLIB_VERSION=$(pkg-config --modversion glib-2.0)" >> $config_host_mak +echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 93affe3669..0b26c01da0 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -233,8 +233,8 @@ Use the more generic event ``DEVICE_UNPLUG_GUEST_ERROR`` instead. System emulator machines ------------------------ -Arm ``virt`` machine ``dtb-kaslr-seed`` property -'''''''''''''''''''''''''''''''''''''''''''''''' +Arm ``virt`` machine ``dtb-kaslr-seed`` property (since 7.1) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The ``dtb-kaslr-seed`` property on the ``virt`` board has been deprecated; use the new name ``dtb-randomness`` instead. The new name diff --git a/docs/system/multi-process.rst b/docs/system/multi-process.rst index 210531ee17..1b8852c27c 100644 --- a/docs/system/multi-process.rst +++ b/docs/system/multi-process.rst @@ -2,7 +2,7 @@ Multi-process QEMU ================== This document describes how to configure and use multi-process qemu. -For the design document refer to docs/devel/qemu-multiprocess. +For the design document refer to docs/devel/multi-process.rst. 1) Configuration ---------------- diff --git a/fpu/softfloat.c b/fpu/softfloat.c index c7454c3eb1..108f9cb224 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -5135,7 +5135,7 @@ float32 float32_exp2(float32 a, float_status *status) float64_unpack_canonical(&rp, float64_one, status); for (i = 0 ; i < 15 ; i++) { float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); - rp = *parts_muladd(&tp, &xp, &rp, 0, status); + rp = *parts_muladd(&tp, &xnp, &rp, 0, status); xnp = *parts_mul(&xnp, &xp, status); } diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c index 5cafcd7703..d9511f429c 100644 --- a/fsdev/virtfs-proxy-helper.c +++ b/fsdev/virtfs-proxy-helper.c @@ -26,6 +26,7 @@ #include "qemu/xattr.h" #include "9p-iov-marshal.h" #include "hw/9pfs/9p-proxy.h" +#include "hw/9pfs/9p-util.h" #include "fsdev/9p-iov-marshal.h" #define PROGNAME "virtfs-proxy-helper" @@ -338,6 +339,28 @@ static void resetugid(int suid, int sgid) } } +/* + * Open regular file or directory. Attempts to open any special file are + * rejected. + * + * returns file descriptor or -1 on error + */ +static int open_regular(const char *pathname, int flags, mode_t mode) +{ + int fd; + + fd = open(pathname, flags, mode); + if (fd < 0) { + return fd; + } + + if (close_if_special_file(fd) < 0) { + return -1; + } + + return fd; +} + /* * send response in two parts * 1) ProxyHeader @@ -682,7 +705,7 @@ static int do_create(struct iovec *iovec) if (ret < 0) { goto unmarshal_err_out; } - ret = open(path.data, flags, mode); + ret = open_regular(path.data, flags, mode); if (ret < 0) { ret = -errno; } @@ -707,7 +730,7 @@ static int do_open(struct iovec *iovec) if (ret < 0) { goto err_out; } - ret = open(path.data, flags); + ret = open_regular(path.data, flags, 0); if (ret < 0) { ret = -errno; } diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h index c3526144c9..6b44e5f7a4 100644 --- a/hw/9pfs/9p-util.h +++ b/hw/9pfs/9p-util.h @@ -13,6 +13,8 @@ #ifndef QEMU_9P_UTIL_H #define QEMU_9P_UTIL_H +#include "qemu/error-report.h" + #ifdef O_PATH #define O_PATH_9P_UTIL O_PATH #else @@ -112,6 +114,38 @@ static inline void close_preserve_errno(int fd) errno = serrno; } +/** + * close_if_special_file() - Close @fd if neither regular file nor directory. + * + * @fd: file descriptor of open file + * Return: 0 on regular file or directory, -1 otherwise + * + * CVE-2023-2861: Prohibit opening any special file directly on host + * (especially device files), as a compromised client could potentially gain + * access outside exported tree under certain, unsafe setups. We expect + * client to handle I/O on special files exclusively on guest side. + */ +static inline int close_if_special_file(int fd) +{ + struct stat stbuf; + + if (fstat(fd, &stbuf) < 0) { + close_preserve_errno(fd); + return -1; + } + if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) { + error_report_once( + "9p: broken or compromised client detected; attempt to open " + "special file (i.e. neither regular file, nor directory)" + ); + close(fd); + errno = ENXIO; + return -1; + } + + return 0; +} + static inline int openat_dir(int dirfd, const char *name) { return openat(dirfd, name, @@ -146,6 +180,10 @@ again: return -1; } + if (close_if_special_file(fd) < 0) { + return -1; + } + serrno = errno; /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat() diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events index 6c77966c0b..a12e55c165 100644 --- a/hw/9pfs/trace-events +++ b/hw/9pfs/trace-events @@ -48,3 +48,9 @@ v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s" v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size %"PRId64" atime=%"PRId64" mtime=%"PRId64" }" v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u" + +# xen-9p-backend.c +xen_9pfs_alloc(char *name) "name %s" +xen_9pfs_connect(char *name) "name %s" +xen_9pfs_disconnect(char *name) "name %s" +xen_9pfs_free(char *name) "name %s" diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 65c4979c3c..ab1df8dd2f 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -24,6 +24,8 @@ #include "qemu/option.h" #include "fsdev/qemu-fsdev.h" +#include "trace.h" + #define VERSIONS "1" #define MAX_RINGS 8 #define MAX_RING_ORDER 9 @@ -335,6 +337,8 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); int i; + trace_xen_9pfs_disconnect(xendev->name); + for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].evtchndev != NULL) { qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), @@ -343,39 +347,40 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) xen_9pdev->rings[i].local_port); xen_9pdev->rings[i].evtchndev = NULL; } - } -} - -static int xen_9pfs_free(struct XenLegacyDevice *xendev) -{ - Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); - int i; - - if (xen_9pdev->rings[0].evtchndev != NULL) { - xen_9pfs_disconnect(xendev); - } - - for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].data != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].data, (1 << xen_9pdev->rings[i].ring_order)); + xen_9pdev->rings[i].data = NULL; } if (xen_9pdev->rings[i].intf != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].intf, 1); + xen_9pdev->rings[i].intf = NULL; } if (xen_9pdev->rings[i].bh != NULL) { qemu_bh_delete(xen_9pdev->rings[i].bh); + xen_9pdev->rings[i].bh = NULL; } } g_free(xen_9pdev->id); + xen_9pdev->id = NULL; g_free(xen_9pdev->tag); + xen_9pdev->tag = NULL; g_free(xen_9pdev->path); + xen_9pdev->path = NULL; g_free(xen_9pdev->security_model); + xen_9pdev->security_model = NULL; g_free(xen_9pdev->rings); + xen_9pdev->rings = NULL; +} + +static int xen_9pfs_free(struct XenLegacyDevice *xendev) +{ + trace_xen_9pfs_free(xendev->name); + return 0; } @@ -387,6 +392,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) V9fsState *s = &xen_9pdev->state; QemuOpts *fsdev; + trace_xen_9pfs_connect(xendev->name); + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings", &xen_9pdev->num_rings) == -1 || xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { @@ -494,6 +501,8 @@ out: static void xen_9pfs_alloc(struct XenLegacyDevice *xendev) { + trace_xen_9pfs_alloc(xendev->name); + xenstore_write_be_str(xendev, "versions", VERSIONS); xenstore_write_be_int(xendev, "max-rings", MAX_RINGS); xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c index 53654f8638..ff14c3f410 100644 --- a/hw/acpi/cpu_hotplug.c +++ b/hw/acpi/cpu_hotplug.c @@ -52,6 +52,9 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = { .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, + .max_access_size = 4, + }, + .impl = { .max_access_size = 1, }, }; diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 84d75e6b84..a2a3738b46 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -429,6 +429,16 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, * acpi_pcihp_eject_slot() when the operation is completed. */ pdev->qdev.pending_deleted_event = true; + /* if unplug was requested before OSPM is initialized, + * linux kernel will clear GPE0.sts[] bits during boot, which effectively + * hides unplug event. And than followup qmp_device_del() calls remain + * blocked by above flag permanently. + * Unblock qmp_device_del() by setting expire limit, so user can + * repeat unplug request later when OSPM has been booted. + */ + pdev->qdev.pending_deleted_expires_ms = + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); /* 1 msec */ + s->acpi_pcihp_pci_status[bsel].down |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); } diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index 55f114ef72..97fb1916ec 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -188,33 +188,35 @@ struct AspeedMachineState { static void aspeed_write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t poll_mailbox_ready[] = { + AddressSpace *as = arm_boot_address_space(cpu, info); + static const ARMInsnFixup poll_mailbox_ready[] = { /* * r2 = per-cpu go sign value * r1 = AST_SMP_MBOX_FIELD_ENTRY * r0 = AST_SMP_MBOX_FIELD_GOSIGN */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 */ - 0xe21000ff, /* ands r0, r0, #255 */ - 0xe59f201c, /* ldr r2, [pc, #28] */ - 0xe1822000, /* orr r2, r2, r0 */ + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5 */ + { 0xe21000ff }, /* ands r0, r0, #255 */ + { 0xe59f201c }, /* ldr r2, [pc, #28] */ + { 0xe1822000 }, /* orr r2, r2, r0 */ - 0xe59f1018, /* ldr r1, [pc, #24] */ - 0xe59f0018, /* ldr r0, [pc, #24] */ + { 0xe59f1018 }, /* ldr r1, [pc, #24] */ + { 0xe59f0018 }, /* ldr r0, [pc, #24] */ - 0xe320f002, /* wfe */ - 0xe5904000, /* ldr r4, [r0] */ - 0xe1520004, /* cmp r2, r4 */ - 0x1afffffb, /* bne */ - 0xe591f000, /* ldr pc, [r1] */ - AST_SMP_MBOX_GOSIGN, - AST_SMP_MBOX_FIELD_ENTRY, - AST_SMP_MBOX_FIELD_GOSIGN, + { 0xe320f002 }, /* wfe */ + { 0xe5904000 }, /* ldr r4, [r0] */ + { 0xe1520004 }, /* cmp r2, r4 */ + { 0x1afffffb }, /* bne */ + { 0xe591f000 }, /* ldr pc, [r1] */ + { AST_SMP_MBOX_GOSIGN }, + { AST_SMP_MBOX_FIELD_ENTRY }, + { AST_SMP_MBOX_FIELD_GOSIGN }, + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; - rom_add_blob_fixed("aspeed.smpboot", poll_mailbox_ready, - sizeof(poll_mailbox_ready), - info->smp_loader_start); + arm_write_bootloader("aspeed.smpboot", as, info->smp_loader_start, + poll_mailbox_ready, fixupcontext); } static void aspeed_reset_secondary(ARMCPU *cpu, diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 15c2bf1867..8ff315f431 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -59,26 +59,6 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu, return cpu_get_address_space(cs, asidx); } -typedef enum { - FIXUP_NONE = 0, /* do nothing */ - FIXUP_TERMINATOR, /* end of insns */ - FIXUP_BOARDID, /* overwrite with board ID number */ - FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ - FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ - FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ - FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ - FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ - FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ - FIXUP_BOOTREG, /* overwrite with boot register address */ - FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ - FIXUP_MAX, -} FixupType; - -typedef struct ARMInsnFixup { - uint32_t insn; - FixupType fixup; -} ARMInsnFixup; - static const ARMInsnFixup bootloader_aarch64[] = { { 0x580000c0 }, /* ldr x0, arg ; Load the lower 32-bits of DTB */ { 0xaa1f03e1 }, /* mov x1, xzr */ @@ -149,9 +129,10 @@ static const ARMInsnFixup smpboot[] = { { 0, FIXUP_TERMINATOR } }; -static void write_bootloader(const char *name, hwaddr addr, - const ARMInsnFixup *insns, uint32_t *fixupcontext, - AddressSpace *as) +void arm_write_bootloader(const char *name, + AddressSpace *as, hwaddr addr, + const ARMInsnFixup *insns, + const uint32_t *fixupcontext) { /* Fix up the specified bootloader fragment and write it into * guest memory using rom_add_blob_fixed(). fixupcontext is @@ -213,8 +194,8 @@ static void default_write_secondary(ARMCPU *cpu, fixupcontext[FIXUP_DSB] = CP15_DSB_INSN; } - write_bootloader("smpboot", info->smp_loader_start, - smpboot, fixupcontext, as); + arm_write_bootloader("smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); } void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, @@ -686,7 +667,10 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, rom_ptr_for_as(as, addr, size)); - g_free(fdt); + if (fdt != ms->fdt) { + g_free(ms->fdt); + ms->fdt = fdt; + } return size; @@ -1171,8 +1155,8 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, fixupcontext[FIXUP_ENTRYPOINT_LO] = entry; fixupcontext[FIXUP_ENTRYPOINT_HI] = entry >> 32; - write_bootloader("bootloader", info->loader_start, - primary_loader, fixupcontext, as); + arm_write_bootloader("bootloader", as, info->loader_start, + primary_loader, fixupcontext); if (info->write_board_setup) { info->write_board_setup(cpu, info); diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index 92d068d1f9..a7d287b1a8 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -16,6 +16,7 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" +#include "hw/arm/boot.h" #include "hw/arm/bcm2836.h" #include "hw/registerfields.h" #include "qemu/error-report.h" @@ -124,20 +125,22 @@ static const char *board_type(uint32_t board_rev) static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) { - static const uint32_t smpboot[] = { - 0xe1a0e00f, /* mov lr, pc */ - 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4), /* mov pc, BOARDSETUP_ADDR */ - 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ - 0xe7e10050, /* ubfx r0, r0, #0, #2 ;extract LSB */ - 0xe59f5014, /* ldr r5, =0x400000CC ;load mbox base */ - 0xe320f001, /* 1: yield */ - 0xe7953200, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core*/ - 0xe3530000, /* cmp r3, #0 ;spin while zero */ - 0x0afffffb, /* beq 1b */ - 0xe7853200, /* str r3, [r5, r0, lsl #4] ;clear mbox */ - 0xe12fff13, /* bx r3 ;jump to target */ - 0x400000cc, /* (constant: mailbox 3 read/clear base) */ + static const ARMInsnFixup smpboot[] = { + { 0xe1a0e00f }, /* mov lr, pc */ + { 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4) }, /* mov pc, BOARDSETUP_ADDR */ + { 0xee100fb0 }, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ + { 0xe7e10050 }, /* ubfx r0, r0, #0, #2 ;extract LSB */ + { 0xe59f5014 }, /* ldr r5, =0x400000CC ;load mbox base */ + { 0xe320f001 }, /* 1: yield */ + { 0xe7953200 }, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core */ + { 0xe3530000 }, /* cmp r3, #0 ;spin while zero */ + { 0x0afffffb }, /* beq 1b */ + { 0xe7853200 }, /* str r3, [r5, r0, lsl #4] ;clear mbox */ + { 0xe12fff13 }, /* bx r3 ;jump to target */ + { 0x400000cc }, /* (constant: mailbox 3 read/clear base) */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; /* check that we don't overrun board setup vectors */ QEMU_BUILD_BUG_ON(SMPBOOT_ADDR + sizeof(smpboot) > MVBAR_ADDR); @@ -145,9 +148,8 @@ static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) QEMU_BUILD_BUG_ON((BOARDSETUP_ADDR & 0xf) != 0 || (BOARDSETUP_ADDR >> 4) >= 0x100); - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, - arm_boot_address_space(cpu, info)); + arm_write_bootloader("raspi_smpboot", arm_boot_address_space(cpu, info), + info->smp_loader_start, smpboot, fixupcontext); } static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) @@ -161,26 +163,28 @@ static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) * the primary CPU goes into the kernel. We put these variables inside * a rom blob, so that the reset for ROM contents zeroes them for us. */ - static const uint32_t smpboot[] = { - 0xd2801b05, /* mov x5, 0xd8 */ - 0xd53800a6, /* mrs x6, mpidr_el1 */ - 0x924004c6, /* and x6, x6, #0x3 */ - 0xd503205f, /* spin: wfe */ - 0xf86678a4, /* ldr x4, [x5,x6,lsl #3] */ - 0xb4ffffc4, /* cbz x4, spin */ - 0xd2800000, /* mov x0, #0x0 */ - 0xd2800001, /* mov x1, #0x0 */ - 0xd2800002, /* mov x2, #0x0 */ - 0xd2800003, /* mov x3, #0x0 */ - 0xd61f0080, /* br x4 */ + static const ARMInsnFixup smpboot[] = { + { 0xd2801b05 }, /* mov x5, 0xd8 */ + { 0xd53800a6 }, /* mrs x6, mpidr_el1 */ + { 0x924004c6 }, /* and x6, x6, #0x3 */ + { 0xd503205f }, /* spin: wfe */ + { 0xf86678a4 }, /* ldr x4, [x5,x6,lsl #3] */ + { 0xb4ffffc4 }, /* cbz x4, spin */ + { 0xd2800000 }, /* mov x0, #0x0 */ + { 0xd2800001 }, /* mov x1, #0x0 */ + { 0xd2800002 }, /* mov x2, #0x0 */ + { 0xd2800003 }, /* mov x3, #0x0 */ + { 0xd61f0080 }, /* br x4 */ + { 0, FIXUP_TERMINATOR } }; + static const uint32_t fixupcontext[FIXUP_MAX] = { 0 }; static const uint64_t spintables[] = { 0, 0, 0, 0 }; - rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), - info->smp_loader_start, as); + arm_write_bootloader("raspi_smpboot", as, info->smp_loader_start, + smpboot, fixupcontext); rom_add_blob_fixed_as("raspi_spintables", spintables, sizeof(spintables), SPINTABLE_ADDR, as); } diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 335cfc417d..5905a33015 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -213,7 +213,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, const char *boot_cpu, Error **errp) { int i; - int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS, + int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), XLNX_ZYNQMP_NUM_RPU_CPUS); if (num_rpus <= 0) { diff --git a/hw/core/machine.c b/hw/core/machine.c index 8d34caa31d..19f42450f5 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -42,6 +42,9 @@ GlobalProperty hw_compat_7_1[] = { { "virtio-device", "queue_reset", "false" }, + { "virtio-rng-pci", "vectors", "0" }, + { "virtio-rng-pci-transitional", "vectors", "0" }, + { "virtio-rng-pci-non-transitional", "vectors", "0" }, }; const size_t hw_compat_7_1_len = G_N_ELEMENTS(hw_compat_7_1); @@ -1326,6 +1329,14 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * } } else if (machine_class->default_ram_id && machine->ram_size && numa_uses_legacy_mem()) { + if (object_property_find(object_get_objects_root(), + machine_class->default_ram_id)) { + error_setg(errp, "object name '%s' is reserved for the default" + " RAM backend, it can't be used for any other purposes." + " Change the object's 'id' to something else", + machine_class->default_ram_id); + return; + } if (!create_default_memdev(current_machine, mem_path, errp)) { return; } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 5e15c79b94..4e2e0dd53a 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -498,6 +498,8 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, struct virtio_gpu_resource_flush rf; struct virtio_gpu_scanout *scanout; pixman_region16_t flush_region; + bool within_bounds = false; + bool update_submitted = false; int i; VIRTIO_GPU_FILL_CMD(rf); @@ -518,13 +520,28 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, rf.r.x < scanout->x + scanout->width && rf.r.x + rf.r.width >= scanout->x && rf.r.y < scanout->y + scanout->height && - rf.r.y + rf.r.height >= scanout->y && - console_has_gl(scanout->con)) { - dpy_gl_update(scanout->con, 0, 0, scanout->width, - scanout->height); + rf.r.y + rf.r.height >= scanout->y) { + within_bounds = true; + + if (console_has_gl(scanout->con)) { + dpy_gl_update(scanout->con, 0, 0, scanout->width, + scanout->height); + update_submitted = true; + } } } - return; + + if (update_submitted) { + return; + } + if (!within_bounds) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" + " bounds for flush %d: %d %d %d %d\n", + __func__, rf.resource_id, rf.r.x, rf.r.y, + rf.r.width, rf.r.height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } } if (!res->blob && diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index cbb8f0f169..99e50f65e2 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -168,6 +168,11 @@ static inline int stream_idle(struct Stream *s) return !!(s->regs[R_DMASR] & DMASR_IDLE); } +static inline int stream_halted(struct Stream *s) +{ + return !!(s->regs[R_DMASR] & DMASR_HALTED); +} + static void stream_reset(struct Stream *s) { s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ @@ -269,7 +274,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev, uint64_t addr; bool eop; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return; } @@ -326,7 +331,7 @@ static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, unsigned int rxlen; size_t pos = 0; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { return 0; } @@ -407,7 +412,7 @@ xilinx_axidma_data_stream_can_push(StreamSink *obj, XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj); struct Stream *s = &ds->dma->streams[1]; - if (!stream_running(s) || stream_idle(s)) { + if (!stream_running(s) || stream_idle(s) || stream_halted(s)) { ds->dma->notify = notify; ds->dma->notify_opaque = notify_opaque; return false; diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index de1cc7ab71..98a78b84b4 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -123,6 +123,7 @@ static FWCfgState *create_fw_cfg(MachineState *ms) { FWCfgState *fw_cfg; uint64_t val; + const char qemu_version[] = QEMU_VERSION; fw_cfg = fw_cfg_init_mem(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, ms->smp.cpus); @@ -148,6 +149,10 @@ static FWCfgState *create_fw_cfg(MachineState *ms) fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ms->boot_config.order[0]); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); + fw_cfg_add_file(fw_cfg, "/etc/qemu-version", + g_memdup(qemu_version, sizeof(qemu_version)), + sizeof(qemu_version)); + return fw_cfg; } @@ -418,10 +423,16 @@ static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) /* Start all CPUs at the firmware entry point. * Monarch CPU will initialize firmware, secondary CPUs - * will enter a small idle look and wait for rendevouz. */ + * will enter a small idle loop and wait for rendevouz. */ for (i = 0; i < smp_cpus; i++) { - cpu_set_pc(CPU(cpu[i]), firmware_entry); + CPUState *cs = CPU(cpu[i]); + + cpu_set_pc(cs, firmware_entry); + cpu[i]->env.psw = PSW_Q; cpu[i]->env.gr[5] = CPU_HPA + i * 0x1000; + + cs->exception_index = -1; + cs->halted = 0; } /* already initialized by machine_hppa_init()? */ diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index a08ee85edf..d025ef2873 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -3179,6 +3179,7 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); IntelIOMMUState *s = vtd_as->iommu_state; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); /* TODO: add support for VFIO and vhost users */ if (s->snoop_control) { @@ -3186,6 +3187,20 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, "Snoop Control with vhost or VFIO is not supported"); return -ENOTSUP; } + if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires caching mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } + if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires device IOTLB mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } /* Update per-address-space notifier flags */ vtd_as->notifier_flags = new; diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index 170a331e3f..b231ceda9a 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -330,7 +330,7 @@ static void microvm_memory_init(MicrovmMachineState *mms) rom_set_fw(fw_cfg); if (machine->kernel_filename != NULL) { - x86_load_linux(x86ms, fw_cfg, 0, true, false); + x86_load_linux(x86ms, fw_cfg, 0, true); } if (mms->option_roms) { diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 546b703cb4..ec5a10534b 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -799,7 +799,7 @@ void xen_load_linux(PCMachineState *pcms) rom_set_fw(fw_cfg); x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->legacy_no_rng_seed); + pcmc->pvh_enabled); for (i = 0; i < nb_option_roms; i++) { assert(!strcmp(option_rom[i].name, "linuxboot.bin") || !strcmp(option_rom[i].name, "linuxboot_dma.bin") || @@ -1119,7 +1119,7 @@ void pc_memory_init(PCMachineState *pcms, if (linux_boot) { x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->legacy_no_rng_seed); + pcmc->pvh_enabled); } for (i = 0; i < nb_option_roms; i++) { diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 0ad0ed1603..04f793cca1 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -405,6 +405,7 @@ static void pc_xen_hvm_init(MachineState *machine) } pc_xen_hvm_init_pci(machine); + xen_igd_reserve_slot(pcms->bus); pci_create_simple(pcms->bus, -1, "xen-platform"); } #endif @@ -449,11 +450,9 @@ DEFINE_I440FX_MACHINE(v7_2, "pc-i440fx-7.2", NULL, static void pc_i440fx_7_1_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_7_2_machine_options(m); m->alias = NULL; m->is_default = false; - pcmc->legacy_no_rng_seed = true; compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); } diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index a496bd6e74..f522874add 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -383,10 +383,8 @@ DEFINE_Q35_MACHINE(v7_2, "pc-q35-7.2", NULL, static void pc_q35_7_1_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_7_2_machine_options(m); m->alias = NULL; - pcmc->legacy_no_rng_seed = true; compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); } diff --git a/hw/i386/x86.c b/hw/i386/x86.c index cb86ea9974..018fbb8b0a 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -26,7 +26,6 @@ #include "qemu/cutils.h" #include "qemu/units.h" #include "qemu/datadir.h" -#include "qemu/guest-random.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qapi/qapi-visit-common.h" @@ -37,7 +36,6 @@ #include "sysemu/whpx.h" #include "sysemu/numa.h" #include "sysemu/replay.h" -#include "sysemu/reset.h" #include "sysemu/sysemu.h" #include "sysemu/cpu-timers.h" #include "sysemu/xen.h" @@ -662,12 +660,12 @@ DeviceState *ioapic_init_secondary(GSIState *gsi_state) return dev; } -typedef struct SetupData { +struct setup_data { uint64_t next; uint32_t type; uint32_t len; uint8_t data[]; -} __attribute__((packed)) SetupData; +} __attribute__((packed)); /* @@ -774,35 +772,10 @@ static bool load_elfboot(const char *kernel_filename, return true; } -typedef struct SetupDataFixup { - void *pos; - hwaddr orig_val, new_val; - uint32_t addr; -} SetupDataFixup; - -static void fixup_setup_data(void *opaque) -{ - SetupDataFixup *fixup = opaque; - stq_p(fixup->pos, fixup->new_val); -} - -static void reset_setup_data(void *opaque) -{ - SetupDataFixup *fixup = opaque; - stq_p(fixup->pos, fixup->orig_val); -} - -static void reset_rng_seed(void *opaque) -{ - SetupData *setup_data = opaque; - qemu_guest_getrandom_nofail(setup_data->data, le32_to_cpu(setup_data->len)); -} - void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool legacy_no_rng_seed) + bool pvh_enabled) { bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; uint16_t protocol; @@ -810,17 +783,16 @@ void x86_load_linux(X86MachineState *x86ms, int dtb_size, setup_data_offset; uint32_t initrd_max; uint8_t header[8192], *setup, *kernel; - hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0, first_setup_data = 0; + hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0; FILE *f; char *vmode; MachineState *machine = MACHINE(x86ms); - SetupData *setup_data; + struct setup_data *setup_data; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; const char *dtb_filename = machine->dtb; const char *kernel_cmdline = machine->kernel_cmdline; SevKernelLoaderContext sev_load_ctx = {}; - enum { RNG_SEED_LENGTH = 32 }; /* Align to 16 bytes as a paranoia measure */ cmdline_size = (strlen(kernel_cmdline) + 16) & ~15; @@ -1097,41 +1069,19 @@ void x86_load_linux(X86MachineState *x86ms, } setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16); - kernel_size = setup_data_offset + sizeof(SetupData) + dtb_size; + kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size; kernel = g_realloc(kernel, kernel_size); + stq_p(header + 0x250, prot_addr + setup_data_offset); - setup_data = (SetupData *)(kernel + setup_data_offset); - setup_data->next = cpu_to_le64(first_setup_data); - first_setup_data = prot_addr + setup_data_offset; + setup_data = (struct setup_data *)(kernel + setup_data_offset); + setup_data->next = 0; setup_data->type = cpu_to_le32(SETUP_DTB); setup_data->len = cpu_to_le32(dtb_size); load_image_size(dtb_filename, setup_data->data, dtb_size); } - if (!legacy_no_rng_seed) { - setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16); - kernel_size = setup_data_offset + sizeof(SetupData) + RNG_SEED_LENGTH; - kernel = g_realloc(kernel, kernel_size); - setup_data = (SetupData *)(kernel + setup_data_offset); - setup_data->next = cpu_to_le64(first_setup_data); - first_setup_data = prot_addr + setup_data_offset; - setup_data->type = cpu_to_le32(SETUP_RNG_SEED); - setup_data->len = cpu_to_le32(RNG_SEED_LENGTH); - qemu_guest_getrandom_nofail(setup_data->data, RNG_SEED_LENGTH); - qemu_register_reset_nosnapshotload(reset_rng_seed, setup_data); - fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_KERNEL_DATA, reset_rng_seed, NULL, - setup_data, kernel, kernel_size, true); - } else { - fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); - } - - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); - sev_load_ctx.kernel_data = (char *)kernel; - sev_load_ctx.kernel_size = kernel_size; - /* * If we're starting an encrypted VM, it will be OVMF based, which uses the * efi stub for booting and doesn't require any values to be placed in the @@ -1140,20 +1090,16 @@ void x86_load_linux(X86MachineState *x86ms, * file the user passed in. */ if (!sev_enabled()) { - SetupDataFixup *fixup = g_malloc(sizeof(*fixup)); - memcpy(setup, header, MIN(sizeof(header), setup_size)); - /* Offset 0x250 is a pointer to the first setup_data link. */ - fixup->pos = setup + 0x250; - fixup->orig_val = ldq_p(fixup->pos); - fixup->new_val = first_setup_data; - fixup->addr = cpu_to_le32(real_addr); - fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_SETUP_ADDR, fixup_setup_data, NULL, - fixup, &fixup->addr, sizeof(fixup->addr), true); - qemu_register_reset(reset_setup_data, fixup); - } else { - fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); } + + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + sev_load_ctx.kernel_data = (char *)kernel; + sev_load_ctx.kernel_size = kernel_size; + + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); sev_load_ctx.setup_data = (char *)setup; diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index 8cca124807..d0bf8d545b 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -49,12 +49,9 @@ static void aw_a10_pic_update(AwA10PICState *s) static void aw_a10_pic_set_irq(void *opaque, int irq, int level) { AwA10PICState *s = opaque; + uint32_t *pending_reg = &s->irq_pending[irq / 32]; - if (level) { - set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } else { - clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); - } + *pending_reg = deposit32(*pending_reg, irq % 32, 1, !!level); aw_a10_pic_update(s); } diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index ac21be306c..69175e972d 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -189,7 +189,7 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, bool acc_mode) { struct iovec iov[ASPEED_HACE_MAX_SG]; - g_autofree uint8_t *digest_buf; + g_autofree uint8_t *digest_buf = NULL; size_t digest_len = 0; int niov = 0; int i; diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index ecc0245fe8..c3fed5fcbe 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -350,8 +350,13 @@ static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s, FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc), + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->status2 = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); } static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s, @@ -400,10 +405,15 @@ static uint32_t allwinner_sun8i_emac_tx_desc(AwSun8iEmacState *s, } static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s, - FrameDescriptor *desc, + const FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc), + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->status2); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, phys_addr, &desc_words, sizeof(desc_words), MEMTXATTRS_UNSPECIFIED); } @@ -638,8 +648,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_TX_CUR_BUF: /* Transmit Current Buffer */ if (s->tx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc), - MEMTXATTRS_UNSPECIFIED); + allwinner_sun8i_emac_get_desc(s, &desc, s->tx_desc_curr); value = desc.addr; } else { value = 0; @@ -652,8 +661,7 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_RX_CUR_BUF: /* Receive Current Buffer */ if (s->rx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc), - MEMTXATTRS_UNSPECIFIED); + allwinner_sun8i_emac_get_desc(s, &desc, s->rx_desc_curr); value = desc.addr; } else { value = 0; diff --git a/hw/net/e1000.c b/hw/net/e1000.c index e26e0a64c1..0dfdf47313 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -567,7 +567,7 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size) qemu_send_packet(nc, buf, size); } inc_tx_bcast_or_mcast_count(s, buf); - e1000x_increase_size_stats(s->mac_reg, PTCregs, size); + e1000x_increase_size_stats(s->mac_reg, PTCregs, size + 4); } static void @@ -631,10 +631,9 @@ xmit_seg(E1000State *s) } e1000x_inc_reg_if_not_full(s->mac_reg, TPT); - e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size); - s->mac_reg[GPTC] = s->mac_reg[TPT]; - s->mac_reg[GOTCL] = s->mac_reg[TOTL]; - s->mac_reg[GOTCH] = s->mac_reg[TOTH]; + e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4); + e1000x_inc_reg_if_not_full(s->mac_reg, GPTC); + e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4); } static void diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index fc9cdb4528..c71d82ce1d 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -687,9 +687,8 @@ e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) g_assert_not_reached(); } - core->mac[GPTC] = core->mac[TPT]; - core->mac[GOTCL] = core->mac[TOTL]; - core->mac[GOTCH] = core->mac[TOTH]; + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); } static void diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c index a8d93870b5..3fdc34f753 100644 --- a/hw/net/e1000x_common.c +++ b/hw/net/e1000x_common.c @@ -217,15 +217,14 @@ e1000x_update_rx_total_stats(uint32_t *mac, e1000x_increase_size_stats(mac, PRCregs, data_fcs_size); e1000x_inc_reg_if_not_full(mac, TPR); - mac[GPRC] = mac[TPR]; + e1000x_inc_reg_if_not_full(mac, GPRC); /* TOR - Total Octets Received: * This register includes bytes received in a packet from the field through the field, inclusively. * Always include FCS length (4) in size. */ e1000x_grow_8reg_if_not_full(mac, TORL, data_size + 4); - mac[GORCL] = mac[TORL]; - mac[GORCH] = mac[TORH]; + e1000x_grow_8reg_if_not_full(mac, GORCL, data_size + 4); } void diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index 7ccd3e5142..db3a04deb1 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -118,14 +118,18 @@ static void emac_load_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) d->next = le32_to_cpu(d->next); } -static void emac_store_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) +static void emac_store_desc(MSF2EmacState *s, const EmacDesc *d, hwaddr desc) { - /* Convert from host endianness into LE. */ - d->pktaddr = cpu_to_le32(d->pktaddr); - d->pktsize = cpu_to_le32(d->pktsize); - d->next = cpu_to_le32(d->next); + EmacDesc outd; + /* + * Convert from host endianness into LE. We use a local struct because + * calling code may still want to look at the fields afterwards. + */ + outd.pktaddr = cpu_to_le32(d->pktaddr); + outd.pktsize = cpu_to_le32(d->pktsize); + outd.next = cpu_to_le32(d->next); - address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); + address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, &outd, sizeof outd); } static void msf2_dma_tx(MSF2EmacState *s) diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 700b1b66b6..eb679d7c40 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -2154,6 +2154,9 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) int large_send_mss = (txdw0 >> CP_TC_LGSEN_MSS_SHIFT) & CP_TC_LGSEN_MSS_MASK; + if (large_send_mss == 0) { + goto skip_offload; + } DPRINTF("+++ C+ mode offloaded task TSO IP data %d " "frame data %d specified MSS=%d\n", diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index aba12759d5..4abd49e298 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -802,7 +802,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, } if (!get_vhost_net(nc->peer)) { - virtio_add_feature(&features, VIRTIO_F_RING_RESET); return features; } diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index d2ab527ef4..56559cda24 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -1441,7 +1441,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) vmxnet3_setup_rx_filtering(s); /* Cache fields from shared memory */ s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); - assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu < VMXNET3_MAX_MTU); + assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu <= VMXNET3_MAX_MTU); VMW_CFPRN("MTU is %u", s->mtu); s->max_rx_frags = diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index e54276dc1d..749a6938dd 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -1331,10 +1331,23 @@ static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, } } +static void nvme_update_cq_eventidx(const NvmeCQueue *cq) +{ + uint32_t v = cpu_to_le32(cq->head); + + //not in 7.2: trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head); + + pci_dma_write(PCI_DEVICE(cq->ctrl), cq->ei_addr, &v, sizeof(v)); +} + static void nvme_update_cq_head(NvmeCQueue *cq) { - pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &cq->head, - sizeof(cq->head)); + uint32_t v; + + pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &v, sizeof(v)); + + cq->head = le32_to_cpu(v); + trace_pci_nvme_shadow_doorbell_cq(cq->cqid, cq->head); } @@ -1351,6 +1364,7 @@ static void nvme_post_cqes(void *opaque) hwaddr addr; if (n->dbbuf_enabled) { + nvme_update_cq_eventidx(cq); nvme_update_cq_head(cq); } @@ -2477,6 +2491,9 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, req); if (status) { + g_free(iocb->range); + qemu_aio_unref(iocb); + return status; } @@ -6141,15 +6158,21 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) static void nvme_update_sq_eventidx(const NvmeSQueue *sq) { - pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &sq->tail, - sizeof(sq->tail)); + uint32_t v = cpu_to_le32(sq->tail); + + pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &v, sizeof(v)); + trace_pci_nvme_eventidx_sq(sq->sqid, sq->tail); } static void nvme_update_sq_tail(NvmeSQueue *sq) { - pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &sq->tail, - sizeof(sq->tail)); + uint32_t v; + + pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &v, sizeof(v)); + + sq->tail = le32_to_cpu(v); + trace_pci_nvme_shadow_doorbell_sq(sq->sqid, sq->tail); } diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 6edf5ea3e9..371a45dfe2 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -693,12 +693,12 @@ static const VMStateDescription vmstate_fw_cfg = { } }; -void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, - FWCfgCallback select_cb, - FWCfgWriteCallback write_cb, - void *callback_opaque, - void *data, size_t len, - bool read_only) +static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, + FWCfgCallback select_cb, + FWCfgWriteCallback write_cb, + void *callback_opaque, + void *data, size_t len, + bool read_only) { int arch = !!(key & FW_CFG_ARCH_LOCAL); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index dc86c1c7db..fbdc48911e 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -806,6 +806,8 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, int64_t signed_decr; /* Truncate value to decr_width and sign extend for simplicity */ + value = extract64(value, 0, nr_bits); + decr = extract64(decr, 0, nr_bits); signed_value = sextract64(value, 0, nr_bits); signed_decr = sextract64(decr, 0, nr_bits); diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index fcbe4c5837..ec8d9584fb 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -271,9 +271,11 @@ static void ibm_40p_init(MachineState *machine) } /* PCI -> ISA bridge */ - i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(11, 0), "i82378")); + i82378_dev = DEVICE(pci_new(PCI_DEVFN(11, 0), "i82378")); qdev_connect_gpio_out(i82378_dev, 0, qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); + qdev_realize_and_unref(i82378_dev, BUS(pci_bus), &error_fatal); + sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c index da7ddfa548..89db963c46 100644 --- a/hw/rdma/vmw/pvrdma_cmd.c +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -796,6 +796,12 @@ int pvrdma_exec_cmd(PVRDMADev *dev) dsr_info = &dev->dsr_info; + if (!dsr_info->dsr) { + /* Buggy or malicious guest driver */ + rdma_error_report("Exec command without dsr, req or rsp buffers"); + goto out; + } + if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) / sizeof(struct cmd_handler)) { rdma_error_report("Unsupported command"); diff --git a/hw/remote/trace-events b/hw/remote/trace-events index c167b3c7a5..0d1b7d56a5 100644 --- a/hw/remote/trace-events +++ b/hw/remote/trace-events @@ -5,8 +5,8 @@ mpqemu_recv_io_error(int cmd, int size, int nfds) "failed to receive %d size %d, # vfio-user-obj.c vfu_prop(const char *prop, const char *val) "vfu: setting %s as %s" -vfu_cfg_read(uint32_t offset, uint32_t val) "vfu: cfg: 0x%u -> 0x%x" -vfu_cfg_write(uint32_t offset, uint32_t val) "vfu: cfg: 0x%u <- 0x%x" +vfu_cfg_read(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x -> 0x%x" +vfu_cfg_write(uint32_t offset, uint32_t val) "vfu: cfg: 0x%x <- 0x%x" vfu_dma_register(uint64_t gpa, size_t len) "vfu: registering GPA 0x%"PRIx64", %zu bytes" vfu_dma_unregister(uint64_t gpa) "vfu: unregistering GPA 0x%"PRIx64"" vfu_bar_register(int i, uint64_t addr, uint64_t size) "vfu: BAR %d: addr 0x%"PRIx64" size 0x%"PRIx64"" diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c index 7fe92d402f..edf6750f54 100644 --- a/hw/riscv/numa.c +++ b/hw/riscv/numa.c @@ -207,6 +207,12 @@ int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx) { int64_t nidx = 0; + if (ms->numa_state->num_nodes > ms->smp.cpus) { + error_report("Number of NUMA nodes (%d)" + " cannot exceed the number of available CPUs (%d).", + ms->numa_state->num_nodes, ms->smp.max_cpus); + exit(EXIT_FAILURE); + } if (ms->numa_state->num_nodes) { nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes); if (ms->numa_state->num_nodes <= nidx) { diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index 50979640c3..42532c4744 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -1134,15 +1134,24 @@ static void lsi_execute_script(LSIState *s) uint32_t addr, addr_high; int opcode; int insn_processed = 0; + static int reentrancy_level; + + reentrancy_level++; s->istat1 |= LSI_ISTAT1_SRUN; again: - if (++insn_processed > LSI_MAX_INSN) { - /* Some windows drivers make the device spin waiting for a memory - location to change. If we have been executed a lot of code then - assume this is the case and force an unexpected device disconnect. - This is apparently sufficient to beat the drivers into submission. - */ + /* + * Some windows drivers make the device spin waiting for a memory location + * to change. If we have executed more than LSI_MAX_INSN instructions then + * assume this is the case and force an unexpected device disconnect. This + * is apparently sufficient to beat the drivers into submission. + * + * Another issue (CVE-2023-0330) can occur if the script is programmed to + * trigger itself again and again. Avoid this problem by stopping after + * being called multiple times in a reentrant way (8 is an arbitrary value + * which should be enough for all valid use cases). + */ + if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) { if (!(s->sien0 & LSI_SIST0_UDC)) { qemu_log_mask(LOG_GUEST_ERROR, "lsi_scsi: inf. loop with UDC masked"); @@ -1596,6 +1605,8 @@ again: } } trace_lsi_execute_script_stop(); + + reentrancy_level--; } static uint8_t lsi_reg_readb(LSIState *s, int offset) diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 92cce20a4d..d513870181 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -190,12 +190,16 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && (r->req.cmd.buf[1] & 0x01)) { page = r->req.cmd.buf[2]; - if (page == 0xb0) { + if (page == 0xb0 && r->buflen >= 8) { + uint8_t buf[16] = {}; + uint8_t buf_used = MIN(r->buflen, 16); uint64_t max_transfer = calculate_max_transfer(s); - stl_be_p(&r->buf[8], max_transfer); - /* Also take care of the opt xfer len. */ - stl_be_p(&r->buf[12], - MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); + + memcpy(buf, r->buf, buf_used); + stl_be_p(&buf[8], max_transfer); + stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12]))); + memcpy(r->buf + 8, buf + 8, buf_used - 8); + } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { /* * Now we're capable of supplying the VPD Block Limits diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index 51e5e90830..92a0f42708 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -302,6 +302,30 @@ static void allwinner_sdhost_auto_stop(AwSdHostState *s) } } +static void read_descriptor(AwSdHostState *s, hwaddr desc_addr, + TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + dma_memory_read(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); + desc->status = le32_to_cpu(desc_words[0]); + desc->size = le32_to_cpu(desc_words[1]); + desc->addr = le32_to_cpu(desc_words[2]); + desc->next = le32_to_cpu(desc_words[3]); +} + +static void write_descriptor(AwSdHostState *s, hwaddr desc_addr, + const TransferDescriptor *desc) +{ + uint32_t desc_words[4]; + desc_words[0] = cpu_to_le32(desc->status); + desc_words[1] = cpu_to_le32(desc->size); + desc_words[2] = cpu_to_le32(desc->addr); + desc_words[3] = cpu_to_le32(desc->next); + dma_memory_write(&s->dma_as, desc_addr, &desc_words, sizeof(desc_words), + MEMTXATTRS_UNSPECIFIED); +} + static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, hwaddr desc_addr, TransferDescriptor *desc, @@ -312,9 +336,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, uint32_t num_bytes = max_bytes; uint8_t buf[1024]; - /* Read descriptor */ - dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc), - MEMTXATTRS_UNSPECIFIED); + read_descriptor(s, desc_addr, desc); if (desc->size == 0) { desc->size = klass->max_desc_size; } else if (desc->size > klass->max_desc_size) { @@ -356,8 +378,7 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, /* Clear hold flag and flush descriptor */ desc->status &= ~DESC_STATUS_HOLD; - dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc), - MEMTXATTRS_UNSPECIFIED); + write_descriptor(s, desc_addr, desc); return num_done; } diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index b4243de735..66a020999b 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -749,14 +749,16 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores; t->core_enabled = t->core_count; - t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores); - t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads; - t->thread_count2 = cpu_to_le16(ms->smp.threads); t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ t->processor_family2 = cpu_to_le16(0x01); /* Other */ + if (tbl_len == SMBIOS_TYPE_4_LEN_V30) { + t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores); + t->thread_count2 = cpu_to_le16(ms->smp.threads); + } + SMBIOS_BUILD_TABLE_POST; smbios_type4_count++; } diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 9520471be2..5f88ffdef8 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -352,6 +352,16 @@ static const VMStateDescription vmstate_hpet = { } }; +static void hpet_arm(HPETTimer *t, uint64_t ticks) +{ + if (ticks < ns_to_ticks(INT64_MAX / 2)) { + timer_mod(t->qemu_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks_to_ns(ticks)); + } else { + timer_del(t->qemu_timer); + } +} + /* * timer expiration callback */ @@ -374,13 +384,11 @@ static void hpet_timer(void *opaque) } } diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) { if (t->wrap_flag) { diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); t->wrap_flag = 0; } } @@ -407,8 +415,7 @@ static void hpet_set_timer(HPETTimer *t) t->wrap_flag = 1; } } - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } static void hpet_del_timer(HPETTimer *t) diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c index 42be79c736..50c6772383 100644 --- a/hw/timer/nrf51_timer.c +++ b/hw/timer/nrf51_timer.c @@ -45,7 +45,12 @@ static uint32_t update_counter(NRF51TimerState *s, int64_t now) uint32_t ticks = ns_to_ticks(s, now - s->update_counter_ns); s->counter = (s->counter + ticks) % BIT(bitwidths[s->bitmode]); - s->update_counter_ns = now; + /* + * Only advance the sync time to the timestamp of the last tick, + * not all the way to 'now', so we don't lose time if we do + * multiple resyncs in a single tick. + */ + s->update_counter_ns += ticks_to_ns(s, ticks); return ticks; } diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 21cabcb6bd..8306824961 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -1214,6 +1214,8 @@ static void ohci_frame_boundary(void *opaque) /* Increment frame number and take care of endianness. */ ohci->frame_number = (ohci->frame_number + 1) & 0xffff; hcca.frame = cpu_to_le16(ohci->frame_number); + /* When the HC updates frame number, set pad to 0. Ref OHCI Spec 4.4.1*/ + hcca.pad = 0; if (ohci->done_count == 0 && !(ohci->intr_status & OHCI_INTR_WD)) { if (!ohci->done) diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 939dcc3d4a..92a45de4c3 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -663,6 +663,8 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev) vfio_disable_interrupts(vdev); + vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); +retry: /* * Setting vector notifiers needs to enable route for each vector. * Deferring to commit the KVM routes once rather than per vector @@ -670,8 +672,6 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev) */ vfio_prepare_kvm_msi_virq_batch(vdev); - vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); -retry: vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors); for (i = 0; i < vdev->nr_vectors; i++) { @@ -3159,7 +3159,9 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) out_deregister: pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); - kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + if (vdev->irqchip_change_notifier.notify) { + kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + } out_teardown: vfio_teardown_msi(vdev); vfio_bars_exit(vdev); diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 5bd14cad96..d422418f2d 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -68,7 +68,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp) */ static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) { - return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx); + return svq->num_free; } /** @@ -263,6 +263,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, return -EINVAL; } + svq->num_free -= ndescs; svq->desc_state[qemu_head].elem = elem; svq->desc_state[qemu_head].ndescs = ndescs; vhost_svq_kick(svq); @@ -449,6 +450,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); svq->desc_next[last_used_chain] = svq->free_head; svq->free_head = used_elem.id; + svq->num_free += num; *len = used_elem.len; return g_steal_pointer(&svq->desc_state[used_elem.id].elem); @@ -522,7 +524,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, size_t vhost_svq_poll(VhostShadowVirtqueue *svq) { int64_t start_us = g_get_monotonic_time(); - uint32_t len; + uint32_t len = 0; do { if (vhost_svq_more_used(svq)) { @@ -656,6 +658,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, svq->vq = vq; svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); + svq->num_free = svq->vring.num; driver_size = vhost_svq_driver_area_size(svq); device_size = vhost_svq_device_area_size(svq); svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size); diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index d04c34a589..328a7fc075 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -107,6 +107,9 @@ typedef struct VhostShadowVirtqueue { /* Next head to consume from the device */ uint16_t last_used_idx; + + /* Size of SVQ vring free descriptors */ + uint16_t num_free; } VhostShadowVirtqueue; bool vhost_svq_valid_features(uint64_t features, Error **errp); diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c index b7b82a1099..97145376eb 100644 --- a/hw/virtio/vhost-user-gpio.c +++ b/hw/virtio/vhost-user-gpio.c @@ -16,6 +16,7 @@ #include "trace.h" #define REALIZE_CONNECTION_RETRIES 3 +#define VHOST_NVQS 2 /* Features required from VirtIO */ static const int feature_bits[] = { @@ -198,8 +199,7 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio) { virtio_delete_queue(gpio->command_vq); virtio_delete_queue(gpio->interrupt_vq); - g_free(gpio->vhost_dev.vqs); - gpio->vhost_dev.vqs = NULL; + g_free(gpio->vhost_vqs); virtio_cleanup(vdev); vhost_user_cleanup(&gpio->vhost_user); } @@ -219,6 +219,9 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp) vhost_dev_set_config_notifier(vhost_dev, &gpio_ops); gpio->vhost_user.supports_config = true; + gpio->vhost_dev.nvqs = VHOST_NVQS; + gpio->vhost_dev.vqs = gpio->vhost_vqs; + ret = vhost_dev_init(vhost_dev, &gpio->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { @@ -337,10 +340,9 @@ static void vu_gpio_device_realize(DeviceState *dev, Error **errp) virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config)); - gpio->vhost_dev.nvqs = 2; gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); - gpio->vhost_dev.vqs = g_new0(struct vhost_virtqueue, gpio->vhost_dev.nvqs); + gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS); gpio->connected = false; diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index dc5c828ba6..60eaf0d95b 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -143,8 +143,6 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c) vhost_user_cleanup(&i2c->vhost_user); virtio_delete_queue(i2c->vq); virtio_cleanup(vdev); - g_free(i2c->vhost_dev.vqs); - i2c->vhost_dev.vqs = NULL; } static int vu_i2c_connect(DeviceState *dev) @@ -228,6 +226,7 @@ static void vu_i2c_device_realize(DeviceState *dev, Error **errp) ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { + g_free(i2c->vhost_dev.vqs); do_vhost_user_cleanup(vdev, i2c); } @@ -239,10 +238,12 @@ static void vu_i2c_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserI2C *i2c = VHOST_USER_I2C(dev); + struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs; /* This will stop vhost backend if appropriate. */ vu_i2c_set_status(vdev, 0); vhost_dev_cleanup(&i2c->vhost_dev); + g_free(vhost_vqs); do_vhost_user_cleanup(vdev, i2c); } diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index 201a39e220..efc54cd3fb 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -229,6 +229,7 @@ static void vu_rng_device_realize(DeviceState *dev, Error **errp) return; vhost_dev_init_failed: + g_free(rng->vhost_dev.vqs); virtio_delete_queue(rng->req_vq); virtio_add_queue_failed: virtio_cleanup(vdev); @@ -239,12 +240,12 @@ static void vu_rng_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserRNG *rng = VHOST_USER_RNG(dev); + struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs; vu_rng_set_status(vdev, 0); vhost_dev_cleanup(&rng->vhost_dev); - g_free(rng->vhost_dev.vqs); - rng->vhost_dev.vqs = NULL; + g_free(vhost_vqs); virtio_delete_queue(rng->req_vq); virtio_cleanup(vdev); vhost_user_cleanup(&rng->vhost_user); diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 8f635844af..d92b026e1c 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -305,19 +305,8 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) return 0; } -struct vhost_user_read_cb_data { - struct vhost_dev *dev; - VhostUserMsg *msg; - GMainLoop *loop; - int ret; -}; - -static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, - gpointer opaque) +static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) { - struct vhost_user_read_cb_data *data = opaque; - struct vhost_dev *dev = data->dev; - VhostUserMsg *msg = data->msg; struct vhost_user *u = dev->opaque; CharBackend *chr = u->user->chr; uint8_t *p = (uint8_t *) msg; @@ -325,8 +314,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, r = vhost_user_read_header(dev, msg); if (r < 0) { - data->ret = r; - goto end; + return r; } /* validate message size is sane */ @@ -334,8 +322,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, error_report("Failed to read msg header." " Size %d exceeds the maximum %zu.", msg->hdr.size, VHOST_USER_PAYLOAD_SIZE); - data->ret = -EPROTO; - goto end; + return -EPROTO; } if (msg->hdr.size) { @@ -346,84 +333,11 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, int saved_errno = errno; error_report("Failed to read msg payload." " Read %d instead of %d.", r, msg->hdr.size); - data->ret = r < 0 ? -saved_errno : -EIO; - goto end; + return r < 0 ? -saved_errno : -EIO; } } -end: - g_main_loop_quit(data->loop); - return G_SOURCE_REMOVE; -} - -static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, - gpointer opaque); - -/* - * This updates the read handler to use a new event loop context. - * Event sources are removed from the previous context : this ensures - * that events detected in the previous context are purged. They will - * be re-detected and processed in the new context. - */ -static void slave_update_read_handler(struct vhost_dev *dev, - GMainContext *ctxt) -{ - struct vhost_user *u = dev->opaque; - - if (!u->slave_ioc) { - return; - } - - if (u->slave_src) { - g_source_destroy(u->slave_src); - g_source_unref(u->slave_src); - } - - u->slave_src = qio_channel_add_watch_source(u->slave_ioc, - G_IO_IN | G_IO_HUP, - slave_read, dev, NULL, - ctxt); -} - -static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) -{ - struct vhost_user *u = dev->opaque; - CharBackend *chr = u->user->chr; - GMainContext *prev_ctxt = chr->chr->gcontext; - GMainContext *ctxt = g_main_context_new(); - GMainLoop *loop = g_main_loop_new(ctxt, FALSE); - struct vhost_user_read_cb_data data = { - .dev = dev, - .loop = loop, - .msg = msg, - .ret = 0 - }; - - /* - * We want to be able to monitor the slave channel fd while waiting - * for chr I/O. This requires an event loop, but we can't nest the - * one to which chr is currently attached : its fd handlers might not - * be prepared for re-entrancy. So we create a new one and switch chr - * to use it. - */ - slave_update_read_handler(dev, ctxt); - qemu_chr_be_update_read_handlers(chr->chr, ctxt); - qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data); - - g_main_loop_run(loop); - - /* - * Restore the previous event loop context. This also destroys/recreates - * event sources : this guarantees that all pending events in the original - * context that have been processed by the nested loop are purged. - */ - qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt); - slave_update_read_handler(dev, NULL); - - g_main_loop_unref(loop); - g_main_context_unref(ctxt); - - return data.ret; + return 0; } static int process_message_reply(struct vhost_dev *dev, @@ -1802,7 +1716,9 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev) return -ECONNREFUSED; } u->slave_ioc = ioc; - slave_update_read_handler(dev, NULL); + u->slave_src = qio_channel_add_watch_source(u->slave_ioc, + G_IO_IN | G_IO_HUP, + slave_read, dev, NULL, NULL); if (reply_supported) { msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; @@ -2108,8 +2024,8 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, } else { if (virtio_has_feature(protocol_features, VHOST_USER_PROTOCOL_F_CONFIG)) { - warn_reportf_err(*errp, "vhost-user backend supports " - "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not."); + warn_report("vhost-user backend supports " + "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not."); protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG); } } diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 7468e44b87..03c78d25d8 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -707,26 +707,11 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev, return ret; } -static void vhost_vdpa_reset_svq(struct vhost_vdpa *v) -{ - if (!v->shadow_vqs_enabled) { - return; - } - - for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { - VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); - vhost_svq_stop(svq); - } -} - static int vhost_vdpa_reset_device(struct vhost_dev *dev) { - struct vhost_vdpa *v = dev->opaque; int ret; uint8_t status = 0; - vhost_vdpa_reset_svq(v); - ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); trace_vhost_vdpa_reset_device(dev, status); return ret; @@ -1088,6 +1073,8 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + + vhost_svq_stop(svq); vhost_vdpa_svq_unmap_rings(dev, svq); } } diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 7fb008bc9e..f38997b3f6 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -20,6 +20,7 @@ #include "qemu/range.h" #include "qemu/error-report.h" #include "qemu/memfd.h" +#include "qemu/log.h" #include "standard-headers/linux/vhost_types.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" @@ -106,6 +107,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, } } +static bool vhost_dev_has_iommu(struct vhost_dev *dev) +{ + VirtIODevice *vdev = dev->vdev; + + /* + * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support + * incremental memory mapping API via IOTLB API. For platform that + * does not have IOMMU, there's no need to enable this feature + * which may cause unnecessary IOTLB miss/update transactions. + */ + if (vdev) { + return virtio_bus_device_iommu_enabled(vdev) && + virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + } else { + return false; + } +} + static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, MemoryRegionSection *section, hwaddr first, @@ -137,8 +156,51 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, continue; } - vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, - range_get_last(vq->used_phys, vq->used_size)); + if (vhost_dev_has_iommu(dev)) { + IOMMUTLBEntry iotlb; + hwaddr used_phys = vq->used_phys, used_size = vq->used_size; + hwaddr phys, s, offset; + + while (used_size) { + rcu_read_lock(); + iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, + used_phys, + true, + MEMTXATTRS_UNSPECIFIED); + rcu_read_unlock(); + + if (!iotlb.target_as) { + qemu_log_mask(LOG_GUEST_ERROR, "translation " + "failure for used_iova %"PRIx64"\n", + used_phys); + return -EINVAL; + } + + offset = used_phys & iotlb.addr_mask; + phys = iotlb.translated_addr + offset; + + /* + * Distance from start of used ring until last byte of + * IOMMU page. + */ + s = iotlb.addr_mask - offset; + /* + * Size of used ring, or of the part of it until end + * of IOMMU page. To avoid zero result, do the adding + * outside of MIN(). + */ + s = MIN(s, used_size - 1) + 1; + + vhost_dev_sync_region(dev, section, start_addr, end_addr, phys, + range_get_last(phys, s)); + used_size -= s; + used_phys += s; + } + } else { + vhost_dev_sync_region(dev, section, start_addr, + end_addr, vq->used_phys, + range_get_last(vq->used_phys, vq->used_size)); + } } return 0; } @@ -306,24 +368,6 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) dev->log_size = size; } -static bool vhost_dev_has_iommu(struct vhost_dev *dev) -{ - VirtIODevice *vdev = dev->vdev; - - /* - * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support - * incremental memory mapping API via IOTLB API. For platform that - * does not have IOMMU, there's no need to enable this feature - * which may cause unnecessary IOTLB miss/update transactions. - */ - if (vdev) { - return virtio_bus_device_iommu_enabled(vdev) && - virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); - } else { - return false; - } -} - static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, hwaddr *plen, bool is_write) { @@ -1890,6 +1934,9 @@ fail_vq: } fail_mem: + if (vhost_dev_has_iommu(hdev)) { + memory_listener_unregister(&hdev->iommu_listener); + } fail_features: vdev->vhost_started = false; hdev->started = false; diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 97da74e719..a6dbdd32da 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -476,15 +476,17 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req) size_t max_len; CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info; - max_len = op_info->iv_len + - op_info->aad_len + - op_info->src_len + - op_info->dst_len + - op_info->digest_result_len; + if (op_info) { + max_len = op_info->iv_len + + op_info->aad_len + + op_info->src_len + + op_info->dst_len + + op_info->digest_result_len; - /* Zeroize and free request data structure */ - memset(op_info, 0, sizeof(*op_info) + max_len); - g_free(op_info); + /* Zeroize and free request data structure */ + memset(op_info, 0, sizeof(*op_info) + max_len); + g_free(op_info); + } } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) { CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info; if (op_info) { diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index ed170def48..56db586c89 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -235,7 +235,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, uint64_t offset, size; int ret = 0; - first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = s->offset_within_region / vmem->block_size; first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit); while (first_bit < vmem->bitmap_size) { MemoryRegionSection tmp = *s; @@ -267,7 +267,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem, uint64_t offset, size; int ret = 0; - first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = s->offset_within_region / vmem->block_size; first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit); while (first_bit < vmem->bitmap_size) { MemoryRegionSection tmp = *s; @@ -341,7 +341,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset, if (ret) { /* Notify all already-notified listeners. */ QLIST_FOREACH(rdl2, &vmem->rdl_list, next) { - MemoryRegionSection tmp = *rdl->section; + MemoryRegionSection tmp = *rdl2->section; if (rdl2 == rdl) { break; diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index eb6347ab5d..384c8f0f08 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -1478,7 +1478,7 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; int64_t len = 0; @@ -1487,13 +1487,12 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - while ((rc = virtqueue_num_heads(vq, idx)) > 0) { MemoryRegionCache *desc_cache = &caches->desc; unsigned int num_bufs; VRingDesc desc; unsigned int i; + unsigned int max = vq->vring.num; num_bufs = total_bufs; @@ -1615,7 +1614,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache *desc_cache; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; @@ -1627,14 +1626,14 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, wrap_counter = vq->last_avail_wrap_counter; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - for (;;) { unsigned int num_bufs = total_bufs; unsigned int i = idx; int rc; + unsigned int max = vq->vring.num; desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, idx, true); if (!is_desc_avail(desc.flags, wrap_counter)) { break; diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 0ec7e52183..5dd706efbf 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -57,6 +57,7 @@ #include #include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "hw/xen/xen.h" @@ -780,15 +781,6 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function, s->dev.devfn); - xen_host_pci_device_get(&s->real_device, - s->hostaddr.domain, s->hostaddr.bus, - s->hostaddr.slot, s->hostaddr.function, - errp); - if (*errp) { - error_append_hint(errp, "Failed to \"open\" the real pci device"); - return; - } - s->is_virtfn = s->real_device.is_virtfn; if (s->is_virtfn) { XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n", @@ -803,8 +795,10 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->io_listener = xen_pt_io_listener; /* Setup VGA bios for passthrough GFX */ - if ((s->real_device.domain == 0) && (s->real_device.bus == 0) && - (s->real_device.dev == 2) && (s->real_device.func == 0)) { + if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) && + (s->real_device.bus == XEN_PCI_IGD_BUS) && + (s->real_device.dev == XEN_PCI_IGD_DEV) && + (s->real_device.func == XEN_PCI_IGD_FN)) { if (!is_igd_vga_passthrough(&s->real_device)) { error_setg(errp, "Need to enable igd-passthru if you're trying" " to passthrough IGD GFX"); @@ -950,11 +944,58 @@ static void xen_pci_passthrough_instance_init(Object *obj) PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; } +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ + if (!xen_igd_gfx_pt_enabled()) { + return; + } + + XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n"); + pci_bus->slot_reserved_mask |= XEN_PCI_IGD_SLOT_MASK; +} + +static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) +{ + ERRP_GUARD(); + PCIDevice *pci_dev = (PCIDevice *)qdev; + XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s); + PCIBus *pci_bus = pci_get_bus(pci_dev); + + xen_host_pci_device_get(&s->real_device, + s->hostaddr.domain, s->hostaddr.bus, + s->hostaddr.slot, s->hostaddr.function, + errp); + if (*errp) { + error_append_hint(errp, "Failed to \"open\" the real pci device"); + return; + } + + if (!(pci_bus->slot_reserved_mask & XEN_PCI_IGD_SLOT_MASK)) { + xpdc->pci_qdev_realize(qdev, errp); + return; + } + + if (is_igd_vga_passthrough(&s->real_device) && + s->real_device.domain == XEN_PCI_IGD_DOMAIN && + s->real_device.bus == XEN_PCI_IGD_BUS && + s->real_device.dev == XEN_PCI_IGD_DEV && + s->real_device.func == XEN_PCI_IGD_FN && + s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) { + pci_bus->slot_reserved_mask &= ~XEN_PCI_IGD_SLOT_MASK; + XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n"); + } + xpdc->pci_qdev_realize(qdev, errp); +} + static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass); + xpdc->pci_qdev_realize = dc->realize; + dc->realize = xen_igd_clear_slot; k->realize = xen_pt_realize; k->exit = xen_pt_unregister_device; k->config_read = xen_pt_pci_read_config; @@ -977,6 +1018,7 @@ static const TypeInfo xen_pci_passthrough_info = { .instance_size = sizeof(XenPCIPassthroughState), .instance_finalize = xen_pci_passthrough_finalize, .class_init = xen_pci_passthrough_class_init, + .class_size = sizeof(XenPTDeviceClass), .instance_init = xen_pci_passthrough_instance_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h index e7c4316a7d..292bdf7499 100644 --- a/hw/xen/xen_pt.h +++ b/hw/xen/xen_pt.h @@ -41,7 +41,20 @@ typedef struct XenPTReg XenPTReg; #define TYPE_XEN_PT_DEVICE "xen-pci-passthrough" OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE) +#define XEN_PT_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(XenPTDeviceClass, klass, TYPE_XEN_PT_DEVICE) +#define XEN_PT_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XenPTDeviceClass, obj, TYPE_XEN_PT_DEVICE) + +typedef void (*XenPTQdevRealize)(DeviceState *qdev, Error **errp); + +typedef struct XenPTDeviceClass { + PCIDeviceClass parent_class; + XenPTQdevRealize pci_qdev_realize; +} XenPTDeviceClass; + uint32_t igd_read_opregion(XenPCIPassthroughState *s); +void xen_igd_reserve_slot(PCIBus *pci_bus); void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val); void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, XenHostPCIDevice *dev); @@ -76,6 +89,13 @@ typedef int (*xen_pt_conf_byte_read) #define XEN_PCI_INTEL_OPREGION 0xfc +#define XEN_PCI_IGD_DOMAIN 0 +#define XEN_PCI_IGD_BUS 0 +#define XEN_PCI_IGD_DEV 2 +#define XEN_PCI_IGD_FN 0 +#define XEN_PCI_IGD_SLOT_MASK \ + (1UL << PCI_SLOT(PCI_DEVFN(XEN_PCI_IGD_DEV, XEN_PCI_IGD_FN))) + typedef enum { XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */ XEN_PT_GRP_TYPE_EMU, /* emul reg group */ diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c index 2d8cac8d54..5c108446a8 100644 --- a/hw/xen/xen_pt_stub.c +++ b/hw/xen/xen_pt_stub.c @@ -20,3 +20,7 @@ void xen_igd_gfx_pt_set(bool value, Error **errp) error_setg(errp, "Xen PCI passthrough support not built in"); } } + +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ +} diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h index f18cc3064f..80c492d742 100644 --- a/include/hw/arm/boot.h +++ b/include/hw/arm/boot.h @@ -183,4 +183,53 @@ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, const struct arm_boot_info *info, hwaddr mvbar_addr); +typedef enum { + FIXUP_NONE = 0, /* do nothing */ + FIXUP_TERMINATOR, /* end of insns */ + FIXUP_BOARDID, /* overwrite with board ID number */ + FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ + FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ + FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ + FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ + FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ + FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ + FIXUP_BOOTREG, /* overwrite with boot register address */ + FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ + FIXUP_MAX, +} FixupType; + +typedef struct ARMInsnFixup { + uint32_t insn; + FixupType fixup; +} ARMInsnFixup; + +/** + * arm_write_bootloader - write a bootloader to guest memory + * @name: name of the bootloader blob + * @as: AddressSpace to write the bootloader + * @addr: guest address to write it + * @insns: the blob to be loaded + * @fixupcontext: context to be used for any fixups in @insns + * + * Write a bootloader to guest memory at address @addr in the address + * space @as. @name is the name to use for the resulting ROM blob, so + * it should be unique in the system and reasonably identifiable for debugging. + * + * @insns must be an array of ARMInsnFixup structs, each of which has + * one 32-bit value to be written to the guest memory, and a fixup to be + * applied to the value. FIXUP_NONE (do nothing) is value 0, so effectively + * the fixup is optional when writing a struct initializer. + * The final entry in the array must be { 0, FIXUP_TERMINATOR }. + * + * All other supported fixup types have the semantics "ignore insn + * and instead use the value from the array element @fixupcontext[fixup]". + * The caller should therefore provide @fixupcontext as an array of + * size FIXUP_MAX whose elements have been initialized for at least + * the entries that @insns refers to. + */ +void arm_write_bootloader(const char *name, + AddressSpace *as, hwaddr addr, + const ARMInsnFixup *insns, + const uint32_t *fixupcontext); + #endif /* HW_ARM_BOOT_H */ diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index c95333514e..0c76e82626 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -128,9 +128,6 @@ struct PCMachineClass { /* create kvmclock device even when KVM PV features are not exposed */ bool kvmclock_create_always; - - /* skip passing an rng seed for legacy machines */ - bool legacy_no_rng_seed; }; #define TYPE_PC_MACHINE "generic-pc-machine" diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index 62fa5774f8..df82c5fd42 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -126,8 +126,7 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool legacy_no_rng_seed); + bool pvh_enabled); bool x86_machine_is_smm_enabled(const X86MachineState *x86ms); bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms); diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index 2e503904dc..c1f81a5f13 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -117,28 +117,6 @@ struct FWCfgMemState { */ void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len); -/** - * fw_cfg_add_bytes_callback: - * @s: fw_cfg device being modified - * @key: selector key value for new fw_cfg item - * @select_cb: callback function when selecting - * @write_cb: callback function after a write - * @callback_opaque: argument to be passed into callback function - * @data: pointer to start of item data - * @len: size of item data - * @read_only: is file read only - * - * Add a new fw_cfg item, available by selecting the given key, as a raw - * "blob" of the given size. The data referenced by the starting pointer - * is only linked, NOT copied, into the data structure of the fw_cfg device. - */ -void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, - FWCfgCallback select_cb, - FWCfgWriteCallback write_cb, - void *callback_opaque, - void *data, size_t len, - bool read_only); - /** * fw_cfg_add_string: * @s: fw_cfg device being modified diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h index a9305c5e6c..a9d3f9b049 100644 --- a/include/hw/virtio/vhost-user-gpio.h +++ b/include/hw/virtio/vhost-user-gpio.h @@ -23,7 +23,7 @@ struct VHostUserGPIO { VirtIODevice parent_obj; CharBackend chardev; struct virtio_gpio_config config; - struct vhost_virtqueue *vhost_vq; + struct vhost_virtqueue *vhost_vqs; struct vhost_dev vhost_dev; VhostUserState vhost_user; VirtQueue *command_vq; diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 333f24ef52..756ec181b6 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -188,4 +188,17 @@ #define QEMU_DISABLE_CFI #endif +/* + * Apple clang version 14 has a bug in its __builtin_subcll(); define + * BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it. + * When a version of Apple clang which has this bug fixed is released + * we can add an upper bound to this check. + * See https://gitlab.com/qemu-project/qemu/-/issues/1631 + * and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details. + * The bug never made it into any upstream LLVM releases, only Apple ones. + */ +#if defined(__apple_build_version__) && __clang_major__ >= 14 +#define BUILTIN_SUBCLL_BROKEN +#endif + #endif /* COMPILER_H */ diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index 88d476161c..b3434ec0bc 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -596,7 +596,7 @@ static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry) */ static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow) { -#if __has_builtin(__builtin_subcll) +#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN) unsigned long long b = *pborrow; x = __builtin_subcll(x, y, b, &b); *pborrow = b & 1; diff --git a/io/channel-tls.c b/io/channel-tls.c index 4ce890a538..4ce08ccc28 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -74,6 +74,9 @@ qio_channel_tls_new_server(QIOChannel *master, ioc = QIO_CHANNEL_TLS(object_new(TYPE_QIO_CHANNEL_TLS)); ioc->master = master; + if (qio_channel_has_feature(master, QIO_CHANNEL_FEATURE_SHUTDOWN)) { + qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SHUTDOWN); + } object_ref(OBJECT(master)); ioc->session = qcrypto_tls_session_new( diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c index 7b25468d02..146aaaafaa 100644 --- a/linux-user/fd-trans.c +++ b/linux-user/fd-trans.c @@ -1622,7 +1622,7 @@ TargetFdTrans target_signalfd_trans = { .host_to_target_data = host_to_target_data_signalfd, }; -static abi_long swap_data_eventfd(void *buf, size_t len) +static abi_long swap_data_u64(void *buf, size_t len) { uint64_t *counter = buf; int i; @@ -1640,8 +1640,12 @@ static abi_long swap_data_eventfd(void *buf, size_t len) } TargetFdTrans target_eventfd_trans = { - .host_to_target_data = swap_data_eventfd, - .target_to_host_data = swap_data_eventfd, + .host_to_target_data = swap_data_u64, + .target_to_host_data = swap_data_u64, +}; + +TargetFdTrans target_timerfd_trans = { + .host_to_target_data = swap_data_u64, }; #if defined(CONFIG_INOTIFY) && (defined(TARGET_NR_inotify_init) || \ diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h index 1b9fa2041c..910faaf237 100644 --- a/linux-user/fd-trans.h +++ b/linux-user/fd-trans.h @@ -130,6 +130,7 @@ extern TargetFdTrans target_netlink_route_trans; extern TargetFdTrans target_netlink_audit_trans; extern TargetFdTrans target_signalfd_trans; extern TargetFdTrans target_eventfd_trans; +extern TargetFdTrans target_timerfd_trans; #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \ (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \ defined(__NR_inotify_init1)) diff --git a/linux-user/generic/target_resource.h b/linux-user/generic/target_resource.h index 539d8c4677..37d3eb09b3 100644 --- a/linux-user/generic/target_resource.h +++ b/linux-user/generic/target_resource.h @@ -12,8 +12,8 @@ struct target_rlimit { }; struct target_rlimit64 { - uint64_t rlim_cur; - uint64_t rlim_max; + abi_ullong rlim_cur; + abi_ullong rlim_max; }; #define TARGET_RLIM_INFINITY ((abi_ulong)-1) diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index d5c1c7941d..8735e58bad 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -290,7 +290,10 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) env->CP0_Status |= (1 << CP0St_FR); env->hflags |= MIPS_HFLAG_F64; } - } else if (!prog_req.fre && !prog_req.frdefault && + } else if (prog_req.fr1) { + env->CP0_Status |= (1 << CP0St_FR); + env->hflags |= MIPS_HFLAG_F64; + } else if (!prog_req.fre && !prog_req.frdefault && !prog_req.fr1 && !prog_req.single && !prog_req.soft) { fprintf(stderr, "qemu: Can't find a matching FPU mode\n"); exit(1); diff --git a/linux-user/s390x/cpu_loop.c b/linux-user/s390x/cpu_loop.c index 285bc60071..8b7ac2879e 100644 --- a/linux-user/s390x/cpu_loop.c +++ b/linux-user/s390x/cpu_loop.c @@ -86,6 +86,15 @@ void cpu_loop(CPUS390XState *env) } else if (ret != -QEMU_ESIGRETURN) { env->regs[2] = ret; } + + if (unlikely(cs->singlestep_enabled)) { + /* + * cpu_tb_exec() did not raise EXCP_DEBUG, because it has seen + * that EXCP_SVC was already pending. + */ + cs->exception_index = EXCP_DEBUG; + } + break; case EXCP_DEBUG: diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 24b25759be..cedf22c5b5 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -1755,6 +1755,11 @@ static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, lladdr = (struct target_sockaddr_ll *)addr; lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); lladdr->sll_hatype = tswap16(lladdr->sll_hatype); + } else if (sa_family == AF_INET6) { + struct sockaddr_in6 *in6addr; + + in6addr = (struct sockaddr_in6 *)addr; + in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id); } unlock_user(target_saddr, target_addr, 0); @@ -11433,39 +11438,58 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, { int gidsetsize = arg1; target_id *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); - ret = get_errno(getgroups(gidsetsize, grouplist)); - if (gidsetsize == 0) - return ret; - if (!is_error(ret)) { - target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); - if (!target_grouplist) - return -TARGET_EFAULT; - for(i = 0;i < ret; i++) - target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); - unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); + if (gidsetsize > NGROUPS_MAX) { + return -TARGET_EINVAL; } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + } + ret = get_errno(getgroups(gidsetsize, grouplist)); + if (!is_error(ret) && gidsetsize > 0) { + target_grouplist = lock_user(VERIFY_WRITE, arg2, + gidsetsize * sizeof(target_id), 0); + if (!target_grouplist) { + return -TARGET_EFAULT; + } + for (i = 0; i < ret; i++) { + target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); + } + unlock_user(target_grouplist, arg2, + gidsetsize * sizeof(target_id)); + } + return ret; } - return ret; case TARGET_NR_setgroups: { int gidsetsize = arg1; target_id *target_grouplist; - gid_t *grouplist = NULL; + g_autofree gid_t *grouplist = NULL; int i; - if (gidsetsize) { - grouplist = alloca(gidsetsize * sizeof(gid_t)); - target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); + + if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + target_grouplist = lock_user(VERIFY_READ, arg2, + gidsetsize * sizeof(target_id), 1); if (!target_grouplist) { return -TARGET_EFAULT; } for (i = 0; i < gidsetsize; i++) { grouplist[i] = low2highgid(tswapid(target_grouplist[i])); } - unlock_user(target_grouplist, arg2, 0); + unlock_user(target_grouplist, arg2, + gidsetsize * sizeof(target_id)); } return get_errno(setgroups(gidsetsize, grouplist)); } @@ -11750,41 +11774,59 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, { int gidsetsize = arg1; uint32_t *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); + if (gidsetsize > NGROUPS_MAX) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + } ret = get_errno(getgroups(gidsetsize, grouplist)); - if (gidsetsize == 0) - return ret; - if (!is_error(ret)) { - target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); + if (!is_error(ret) && gidsetsize > 0) { + target_grouplist = lock_user(VERIFY_WRITE, arg2, + gidsetsize * 4, 0); if (!target_grouplist) { return -TARGET_EFAULT; } - for(i = 0;i < ret; i++) + for (i = 0; i < ret; i++) { target_grouplist[i] = tswap32(grouplist[i]); + } unlock_user(target_grouplist, arg2, gidsetsize * 4); } + return ret; } - return ret; #endif #ifdef TARGET_NR_setgroups32 case TARGET_NR_setgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; - gid_t *grouplist; + g_autofree gid_t *grouplist = NULL; int i; - grouplist = alloca(gidsetsize * sizeof(gid_t)); - target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); - if (!target_grouplist) { - return -TARGET_EFAULT; + if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { + return -TARGET_EINVAL; + } + if (gidsetsize > 0) { + grouplist = g_try_new(gid_t, gidsetsize); + if (!grouplist) { + return -TARGET_ENOMEM; + } + target_grouplist = lock_user(VERIFY_READ, arg2, + gidsetsize * 4, 1); + if (!target_grouplist) { + return -TARGET_EFAULT; + } + for (i = 0; i < gidsetsize; i++) { + grouplist[i] = tswap32(target_grouplist[i]); + } + unlock_user(target_grouplist, arg2, 0); } - for(i = 0;i < gidsetsize; i++) - grouplist[i] = tswap32(target_grouplist[i]); - unlock_user(target_grouplist, arg2, 0); return get_errno(setgroups(gidsetsize, grouplist)); } #endif @@ -12883,8 +12925,8 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { return -TARGET_EFAULT; } - rnew.rlim_cur = tswap64(target_rnew->rlim_cur); - rnew.rlim_max = tswap64(target_rnew->rlim_max); + __get_user(rnew.rlim_cur, &target_rnew->rlim_cur); + __get_user(rnew.rlim_max, &target_rnew->rlim_max); unlock_user_struct(target_rnew, arg3, 0); rnewp = &rnew; } @@ -12894,8 +12936,8 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { return -TARGET_EFAULT; } - target_rold->rlim_cur = tswap64(rold.rlim_cur); - target_rold->rlim_max = tswap64(rold.rlim_max); + __put_user(rold.rlim_cur, &target_rold->rlim_cur); + __put_user(rold.rlim_max, &target_rold->rlim_max); unlock_user_struct(target_rold, arg4, 1); } return ret; @@ -13115,8 +13157,12 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) case TARGET_NR_timerfd_create: - return get_errno(timerfd_create(arg1, - target_to_host_bitmask(arg2, fcntl_flags_tbl))); + ret = get_errno(timerfd_create(arg1, + target_to_host_bitmask(arg2, fcntl_flags_tbl))); + if (ret >= 0) { + fd_trans_register(ret, &target_timerfd_trans); + } + return ret; #endif #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) diff --git a/meson.build b/meson.build index 691c3b9047..0ae9c6f8a1 100644 --- a/meson.build +++ b/meson.build @@ -2831,7 +2831,7 @@ config_host_data.set('CONFIG_SLIRP', slirp.found()) genh += configure_file(output: 'config-host.h', configuration: config_host_data) hxtool = find_program('scripts/hxtool') -shaderinclude = find_program('scripts/shaderinclude.pl') +shaderinclude = find_program('scripts/shaderinclude.py') qapi_gen = find_program('scripts/qapi-gen.py') qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py', meson.current_source_dir() / 'scripts/qapi/commands.py', @@ -3245,6 +3245,10 @@ modinfo_files = [] block_mods = [] softmmu_mods = [] foreach d, list : modules + if not (d == 'block' ? have_block : have_system) + continue + endif + foreach m, module_ss : list if enable_modules and targetos != 'windows' module_ss = module_ss.apply(config_all, strict: false) diff --git a/migration/migration.c b/migration/migration.c index f485eea5fb..c19fb5cb3e 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -3320,7 +3320,6 @@ static void migration_completion(MigrationState *s) ret = global_state_store(); if (!ret) { - bool inactivate = !migrate_colo_enabled(); ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); trace_migration_completion_vm_stop(ret); if (ret >= 0) { @@ -3328,12 +3327,15 @@ static void migration_completion(MigrationState *s) MIGRATION_STATUS_DEVICE); } if (ret >= 0) { + /* + * Inactivate disks except in COLO, and track that we + * have done so in order to remember to reactivate + * them if migration fails or is cancelled. + */ + s->block_inactive = !migrate_colo_enabled(); qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, - inactivate); - } - if (inactivate && ret >= 0) { - s->block_inactive = true; + s->block_inactive); } } qemu_mutex_unlock_iothread(); @@ -3370,13 +3372,13 @@ static void migration_completion(MigrationState *s) rp_error = await_return_path_close_on_source(s); trace_migration_return_path_end_after(rp_error); if (rp_error) { - goto fail_invalidate; + goto fail; } } if (qemu_file_get_error(s->to_dst_file)) { trace_migration_completion_file_err(); - goto fail_invalidate; + goto fail; } if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) { @@ -3390,12 +3392,13 @@ static void migration_completion(MigrationState *s) return; -fail_invalidate: - /* If not doing postcopy, vm_start() will be called: let's regain - * control on images. - */ - if (s->state == MIGRATION_STATUS_ACTIVE || - s->state == MIGRATION_STATUS_DEVICE) { +fail: + if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || + s->state == MIGRATION_STATUS_DEVICE)) { + /* + * If not doing postcopy, vm_start() will be called: let's + * regain control on images. + */ Error *local_err = NULL; qemu_mutex_lock_iothread(); @@ -3408,7 +3411,6 @@ fail_invalidate: qemu_mutex_unlock_iothread(); } -fail: migrate_set_state(&s->state, current_active_state, MIGRATION_STATUS_FAILED); } diff --git a/migration/ram.c b/migration/ram.c index 1338e47665..f25ebd9620 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1765,13 +1765,15 @@ out: static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, ram_addr_t size) { + const ram_addr_t end = offset + size; + /* * We read one byte of each page; this will preallocate page tables if * required and populate the shared zeropage on MAP_PRIVATE anonymous memory * where no page was populated yet. This might require adaption when * supporting other mappings, like shmem. */ - for (; offset < size; offset += block->page_size) { + for (; offset < end; offset += block->page_size) { char tmp = *((char *)block->host + offset); /* Don't optimize the read out */ @@ -1885,13 +1887,14 @@ int ram_write_tracking_start(void) block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { goto fail; } + block->flags |= RAM_UF_WRITEPROTECT; + memory_region_ref(block->mr); + /* Apply UFFD write protection to the block memory range */ if (uffd_change_protection(rs->uffdio_fd, block->host, block->max_length, true, false)) { goto fail; } - block->flags |= RAM_UF_WRITEPROTECT; - memory_region_ref(block->mr); trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, block->host, block->max_length); diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 2b4b85d8f8..e533f8a348 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -49,6 +49,7 @@ const int vdpa_feature_bits[] = { VIRTIO_F_VERSION_1, VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, VIRTIO_NET_F_GSO, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, @@ -160,6 +161,14 @@ static void vhost_vdpa_cleanup(NetClientState *nc) VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); struct vhost_dev *dev = &s->vhost_net->dev; + /* + * If a peer NIC is attached, do not cleanup anything. + * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() + * when the guest is shutting down. + */ + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { + return; + } qemu_vfree(s->cvq_cmd_out_buffer); qemu_vfree(s->status); if (dev->vq_index + dev->nvqs == dev->vq_index_end) { @@ -500,7 +509,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, } if (*s->status != VIRTIO_NET_OK) { - return VIRTIO_NET_ERR; + goto out; } status = VIRTIO_NET_ERR; diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img index b2cbb71ee0..0fa3808f16 100644 Binary files a/pc-bios/hppa-firmware.img and b/pc-bios/hppa-firmware.img differ diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build index 06c75e646b..452395b962 100644 --- a/pc-bios/keymaps/meson.build +++ b/pc-bios/keymaps/meson.build @@ -1,5 +1,5 @@ keymaps = { - 'ar': '-l ar', + 'ar': '-l ara', 'bepo': '-l fr -v dvorak', 'cz': '-l cz', 'da': '-l dk', diff --git a/qemu-options.hx b/qemu-options.hx index 7f99d15b23..e52289479b 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -1140,10 +1140,22 @@ have gone through several iterations as the feature set and complexity of the block layer have grown. Many online guides to QEMU often reference older and deprecated options, which can lead to confusion. -The recommended modern way to describe disks is to use a combination of +The most explicit way to describe disks is to use a combination of ``-device`` to specify the hardware device and ``-blockdev`` to describe the backend. The device defines what the guest sees and the -backend describes how QEMU handles the data. +backend describes how QEMU handles the data. It is the only guaranteed +stable interface for describing block devices and as such is +recommended for management tools and scripting. + +The ``-drive`` option combines the device and backend into a single +command line option which is a more human friendly. There is however no +interface stability guarantee although some older board models still +need updating to work with the modern blockdev forms. + +Older options like ``-hda`` are essentially macros which expand into +``-drive`` options for various drive interfaces. The original forms +bake in a lot of assumptions from the days when QEMU was emulating a +legacy PC, they are not recommended for modern configurations. ERST @@ -1636,6 +1648,14 @@ SRST the raw disk image you use is not written back. You can however force the write back by pressing C-a s (see the :ref:`disk images` chapter in the System Emulation Users Guide). + + .. warning:: + snapshot is incompatible with ``-blockdev`` (instead use qemu-img + to manually create snapshot images to attach to your blockdev). + If you have mixed ``-blockdev`` and ``-drive`` declarations you + can use the 'snapshot' property on your drive declarations + instead of this global option. + ERST DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev, diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 32493d6383..182eba4a38 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -1925,10 +1925,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (systemd_supports_mode(mode, &local_err)) { mode_supported = true; systemd_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); @@ -1937,10 +1937,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (pmutils_supports_mode(mode, &local_err)) { mode_supported = true; pmutils_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); diff --git a/qga/commands.c b/qga/commands.c index 7ff551d092..6cf978322e 100644 --- a/qga/commands.c +++ b/qga/commands.c @@ -32,9 +32,8 @@ #define GUEST_FILE_READ_COUNT_MAX (48 * MiB) /* Note: in some situations, like with the fsfreeze, logging may be - * temporarilly disabled. if it is necessary that a command be able - * to log for accounting purposes, check ga_logging_enabled() beforehand, - * and use the QERR_QGA_LOGGING_DISABLED to generate an error + * temporarily disabled. if it is necessary that a command be able + * to log for accounting purposes, check ga_logging_enabled() beforehand. */ void slog(const gchar *fmt, ...) { diff --git a/qga/installer/qemu-ga.wxs b/qga/installer/qemu-ga.wxs index 813d1c6ca6..3442383627 100644 --- a/qga/installer/qemu-ga.wxs +++ b/qga/installer/qemu-ga.wxs @@ -31,6 +31,7 @@ /> + diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index b57508fbe0..b8087e5baa 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -518,7 +518,7 @@ namespace _com_util /* Stop QGA VSS provider service using Winsvc API */ STDAPI StopService(void) { - HRESULT hr; + HRESULT hr = S_OK; SC_HANDLE manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); SC_HANDLE service = NULL; diff --git a/roms/seabios-hppa b/roms/seabios-hppa index 458626c4c6..673d2595d4 160000 --- a/roms/seabios-hppa +++ b/roms/seabios-hppa @@ -1 +1 @@ -Subproject commit 458626c4c6441045c0612f24313c7cf1f95e71c6 +Subproject commit 673d2595d4f773cc266cbf8dbaf2f475a6adb949 diff --git a/scripts/device-crash-test b/scripts/device-crash-test index 73bcb98693..b74d887331 100755 --- a/scripts/device-crash-test +++ b/scripts/device-crash-test @@ -397,7 +397,7 @@ def binariesToTest(args, testcase): def accelsToTest(args, testcase): - if getBinaryInfo(args, testcase['binary']).kvm_available: + if getBinaryInfo(args, testcase['binary']).kvm_available and not args.tcg_only: yield 'kvm' yield 'tcg' @@ -510,6 +510,8 @@ def main(): help="Full mode: test cases that are expected to fail") parser.add_argument('--strict', action='store_true', dest='strict', help="Treat all warnings as fatal") + parser.add_argument('--tcg-only', action='store_true', dest='tcg_only', + help="Only test with TCG accelerator") parser.add_argument('qemu', nargs='*', metavar='QEMU', help='QEMU binary to run') args = parser.parse_args() diff --git a/scripts/shaderinclude.pl b/scripts/shaderinclude.pl deleted file mode 100644 index cd3bb40b12..0000000000 --- a/scripts/shaderinclude.pl +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env perl -use strict; -use warnings; - -my $file = shift; -open FILE, "<", $file or die "open $file: $!"; -my $name = $file; -$name =~ s|.*/||; -$name =~ s/[-.]/_/g; -print "static GLchar ${name}_src[] =\n"; -while () { - chomp; - printf " \"%s\\n\"\n", $_; -} -print " \"\\n\";\n"; -close FILE; diff --git a/scripts/shaderinclude.py b/scripts/shaderinclude.py new file mode 100644 index 0000000000..ab2aade2cd --- /dev/null +++ b/scripts/shaderinclude.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2023 Red Hat, Inc. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import sys +import os + + +def main(args): + file_path = args[1] + basename = os.path.basename(file_path) + varname = basename.replace('-', '_').replace('.', '_') + + with os.fdopen(sys.stdout.fileno(), "wt", closefd=False, newline='\n') as stdout: + with open(file_path, "r", encoding='utf-8') as file: + print(f'static GLchar {varname}_src[] =', file=stdout) + for line in file: + line = line.rstrip() + print(f' "{line}\\n"', file=stdout) + print(' "\\n";', file=stdout) + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/scripts/symlink-install-tree.py b/scripts/symlink-install-tree.py index a5bf0b0d6d..67cb86dd52 100644 --- a/scripts/symlink-install-tree.py +++ b/scripts/symlink-install-tree.py @@ -17,7 +17,6 @@ def destdir_join(d1: str, d2: str) -> str: out = subprocess.run([*introspect.split(' '), '--installed'], stdout=subprocess.PIPE, check=True).stdout for source, dest in json.loads(out).items(): - assert os.path.isabs(source) bundle_dest = destdir_join('qemu-bundle', dest) path = os.path.dirname(bundle_dest) try: diff --git a/softmmu/icount.c b/softmmu/icount.c index 4504433e16..a5cef9c60a 100644 --- a/softmmu/icount.c +++ b/softmmu/icount.c @@ -259,11 +259,16 @@ static void icount_warp_rt(void) warp_delta = clock - timers_state.vm_clock_warp_start; if (icount_enabled() == 2) { /* - * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too - * far ahead of real time. + * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far + * ahead of real time (it might already be ahead so careful not + * to go backwards). */ int64_t cur_icount = icount_get_locked(); int64_t delta = clock - cur_icount; + + if (delta < 0) { + delta = 0; + } warp_delta = MIN(warp_delta, delta); } qatomic_set_i64(&timers_state.qemu_icount_bias, diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index d6ee6e7d91..b17e82b2b0 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -2547,6 +2547,10 @@ virtqueue_alloc_element(size_t sz, assert(sz >= sizeof(VuVirtqElement)); elem = malloc(out_sg_end); + if (!elem) { + DPRINT("%s: failed to malloc virtqueue element\n", __func__); + return NULL; + } elem->out_num = out_num; elem->in_num = in_num; elem->in_sg = (void *)elem + in_sg_ofs; @@ -2633,6 +2637,9 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) /* Now copy what we have collected and mapped */ elem = virtqueue_alloc_element(sz, out_num, in_num); + if (!elem) { + return NULL; + } elem->index = idx; for (i = 0; i < out_num; i++) { elem->out_sg[i] = iov[i]; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 9aeed3c848..a9cd7178f8 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -2407,6 +2407,9 @@ static inline bool arm_is_el3_or_mon(CPUARMState *env) /* Return true if the processor is in secure state */ static inline bool arm_is_secure(CPUARMState *env) { + if (arm_feature(env, ARM_FEATURE_M)) { + return env->v7m.secure; + } if (arm_is_el3_or_mon(env)) { return true; } diff --git a/target/arm/helper.c b/target/arm/helper.c index d8c8223ec3..22bc935242 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1820,6 +1820,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) if (cpu_isar_feature(aa64_sme, cpu)) { valid_mask |= SCR_ENTP2; } + if (cpu_isar_feature(aa64_hcx, cpu)) { + valid_mask |= SCR_HXEN; + } } else { valid_mask &= ~(SCR_RW | SCR_ST); if (cpu_isar_feature(aa32_ras, cpu)) { diff --git a/target/arm/kvm.c b/target/arm/kvm.c index f022c644d2..84da49332c 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -280,6 +280,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + kvm_arm_init_debug(s); + return ret; } diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 1197253d12..810db33ccb 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -74,24 +74,16 @@ GArray *hw_breakpoints, *hw_watchpoints; #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) -/** - * kvm_arm_init_debug() - check for guest debug capabilities - * @cs: CPUState - * - * kvm_check_extension returns the number of debug registers we have - * or 0 if we have none. - * - */ -static void kvm_arm_init_debug(CPUState *cs) +void kvm_arm_init_debug(KVMState *s) { - have_guest_debug = kvm_check_extension(cs->kvm_state, + have_guest_debug = kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG); - max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS); + max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); hw_watchpoints = g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps); - max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS); + max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); hw_breakpoints = g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps); return; @@ -920,8 +912,6 @@ int kvm_arch_init_vcpu(CPUState *cs) } cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; - kvm_arm_init_debug(cs); - /* Check whether user space can specify guest syndrome value */ kvm_arm_init_serror_injection(cs); diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 99017b635c..330fbe5c72 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -18,6 +18,14 @@ #define KVM_ARM_VGIC_V2 (1 << 0) #define KVM_ARM_VGIC_V3 (1 << 1) +/** + * kvm_arm_init_debug() - initialize guest debug capabilities + * @s: KVMState + * + * Should be called only once before using guest debug capabilities. + */ +void kvm_arm_init_debug(KVMState *s); + /** * kvm_arm_vcpu_init: * @cs: CPUState diff --git a/target/arm/ptw.c b/target/arm/ptw.c index f812734bfb..0b16068557 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -238,8 +238,8 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, }; GetPhysAddrResult s2 = { }; - if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD, - false, &s2, fi)) { + if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD, + false, &s2, fi)) { goto fail; } ptw->out_phys = s2.f.phys_addr; @@ -266,7 +266,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, if (unlikely(flags & TLB_INVALID_MASK)) { goto fail; } - ptw->out_phys = full->phys_addr; + ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); ptw->out_rw = full->prot & PAGE_WRITE; pte_attrs = full->pte_attrs; pte_secure = full->attrs.secure; diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index 1afeadf9c8..27838fb6e2 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -5354,15 +5354,10 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, #ifdef CONFIG_USER_ONLY flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault, &info->host, retaddr); - memset(&info->attrs, 0, sizeof(info->attrs)); - /* Require both ANON and MTE; see allocation_tag_mem(). */ - info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE); #else CPUTLBEntryFull *full; flags = probe_access_full(env, addr, access_type, mmu_idx, nofault, &info->host, &full, retaddr); - info->attrs = full->attrs; - info->tagged = full->pte_attrs == 0xf0; #endif info->flags = flags; @@ -5371,6 +5366,15 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, return false; } +#ifdef CONFIG_USER_ONLY + memset(&info->attrs, 0, sizeof(info->attrs)); + /* Require both ANON and MTE; see allocation_tag_mem(). */ + info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE); +#else + info->attrs = full->attrs; + info->tagged = full->pte_attrs == 0xf0; +#endif + /* Ensure that info->host[] is relative to addr, not addr + mem_off. */ info->host -= mem_off; return true; @@ -6722,6 +6726,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, intptr_t reg_off; SVEHostPage info; target_ulong addr, in_page; + ARMVectorReg scratch; /* Skip to the first true predicate. */ reg_off = find_next_active(vg, 0, reg_max, esz); @@ -6731,6 +6736,11 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, return; } + /* Protect against overlap between vd and vm. */ + if (unlikely(vd == vm)) { + vm = memcpy(&scratch, vm, reg_max); + } + /* * Probe the first element, allowing faults. */ diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c index 0f4f4fc809..1384fe6f98 100644 --- a/target/arm/tlb_helper.c +++ b/target/arm/tlb_helper.c @@ -82,8 +82,17 @@ static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi, ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); uint32_t fsr, fsc; - if (target_el == 2 || arm_el_is_aa64(env, target_el) || - arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { + /* + * For M-profile there is no guest-facing FSR. We compute a + * short-form value for env->exception.fsr which we will then + * examine in arm_v7m_cpu_do_interrupt(). In theory we could + * use the LPAE format instead as long as both bits of code agree + * (and arm_fi_to_lfsc() handled the M-profile specific + * ARMFault_QEMU_NSCExec and ARMFault_QEMU_SFault cases). + */ + if (!arm_feature(env, ARM_FEATURE_M) && + (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, arm_mmu_idx))) { /* * LPAE format fault status register : bottom 6 bits are * status code in the same form as needed for syndrome diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h index 5339c22f1e..99eea85fa8 100644 --- a/target/arm/translate-a32.h +++ b/target/arm/translate-a32.h @@ -61,6 +61,13 @@ static inline TCGv_i32 load_cpu_offset(int offset) #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name)) +/* Load from the low half of a 64-bit field to a TCGv_i32 */ +#define load_cpu_field_low32(name) \ + ({ \ + QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 8); \ + load_cpu_offset(offsetoflow32(CPUARMState, name)); \ + }) + void store_cpu_offset(TCGv_i32 var, int offset, int size); #define store_cpu_field(var, name) \ diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 2ee171f249..f0b8db7ce5 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -3536,8 +3536,22 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, */ fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop); - if ((mop & MO_SIGN) && size != MO_64) { - tcg_gen_ext32u_i64(tcg_rt, tcg_rt); + if (mop & MO_SIGN) { + switch (size) { + case MO_8: + tcg_gen_ext8u_i64(tcg_rt, tcg_rt); + break; + case MO_16: + tcg_gen_ext16u_i64(tcg_rt, tcg_rt); + break; + case MO_32: + tcg_gen_ext32u_i64(tcg_rt, tcg_rt); + break; + case MO_64: + break; + default: + g_assert_not_reached(); + } } } @@ -4176,9 +4190,13 @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn) if (s->ata) { gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt); } else { + /* + * Tag access disabled: we must check for aborts on the load + * load from [rn+offset], and then insert a 0 tag into rt. + */ clean_addr = clean_data_tbi(s, addr); gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8); - gen_address_with_allocation_tag0(tcg_rt, addr); + gen_address_with_allocation_tag0(tcg_rt, tcg_rt); } } else { tcg_rt = cpu_reg_sp(s, rt); diff --git a/target/arm/translate.c b/target/arm/translate.c index 74a903072f..a06da05640 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1184,7 +1184,7 @@ static inline void gen_hlt(DisasContext *s, int imm) * semihosting, to provide some semblance of security * (and for consistency with our 32-bit semihosting). */ - if (semihosting_enabled(s->current_el != 0) && + if (semihosting_enabled(s->current_el == 0) && (imm == (s->thumb ? 0x3c : 0xf000))) { gen_exception_internal_insn(s, EXCP_SEMIHOST); return; @@ -2886,7 +2886,7 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, if (arm_dc_feature(s, ARM_FEATURE_AARCH64) && dc_isar_feature(aa64_sel2, s)) { /* Target EL is EL<3 minus SCR_EL3.EEL2> */ - tcg_el = load_cpu_field(cp15.scr_el3); + tcg_el = load_cpu_field_low32(cp15.scr_el3); tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1); tcg_gen_addi_i32(tcg_el, tcg_el, 3); } else { @@ -6558,7 +6558,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) } if (s->current_el == 2) { /* ERET from Hyp uses ELR_Hyp, not LR */ - tmp = load_cpu_field(elr_el[2]); + tmp = load_cpu_field_low32(elr_el[2]); } else { tmp = load_reg(s, 14); } diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 2b2fb07c3a..2330a8d902 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -5624,8 +5624,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } else { *eax &= env->features[FEAT_SGX_12_1_EAX]; *ebx &= 0; /* ebx reserve */ - *ecx &= env->features[FEAT_XSAVE_XSS_LO]; - *edx &= env->features[FEAT_XSAVE_XSS_HI]; + *ecx &= env->features[FEAT_XSAVE_XCR0_LO]; + *edx &= env->features[FEAT_XSAVE_XCR0_HI]; /* FP and SSE are always allowed regardless of XSAVE/XCR0. */ *ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK; diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 3cbc36a59d..44c1e70093 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -2493,6 +2493,14 @@ void helper_vpermdq_ymm(Reg *d, Reg *v, Reg *s, uint32_t order) d->Q(1) = r1; d->Q(2) = r2; d->Q(3) = r3; + if (order & 0x8) { + d->Q(0) = 0; + d->Q(1) = 0; + } + if (order & 0x80) { + d->Q(2) = 0; + d->Q(3) = 0; + } } void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order) diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 99abc5886a..b1f9494aaa 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -1015,6 +1015,7 @@ VSIB_AVX(VPGATHERQ, vpgatherq) static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op) { + int opposite_cc_op; TCGv carry_in = NULL; TCGv carry_out = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2); TCGv zero; @@ -1022,14 +1023,8 @@ static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op) if (cc_op == s->cc_op || s->cc_op == CC_OP_ADCOX) { /* Re-use the carry-out from a previous round. */ carry_in = carry_out; - cc_op = s->cc_op; - } else if (s->cc_op == CC_OP_ADCX || s->cc_op == CC_OP_ADOX) { - /* Merge with the carry-out from the opposite instruction. */ - cc_op = CC_OP_ADCOX; - } - - /* If we don't have a carry-in, get it out of EFLAGS. */ - if (!carry_in) { + } else { + /* We don't have a carry-in, get it out of EFLAGS. */ if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { gen_compute_eflags(s); } @@ -1053,7 +1048,14 @@ static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op) tcg_gen_add2_tl(s->T0, carry_out, s->T0, carry_out, s->T1, zero); break; } - set_cc_op(s, cc_op); + + opposite_cc_op = cc_op == CC_OP_ADCX ? CC_OP_ADOX : CC_OP_ADCX; + if (s->cc_op == CC_OP_ADCOX || s->cc_op == opposite_cc_op) { + /* Merge with the carry-out from the opposite instruction. */ + set_cc_op(s, CC_OP_ADCOX); + } else { + set_cc_op(s, cc_op); + } } static void gen_ADCX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1078,30 +1080,30 @@ static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; - TCGv bound, zero; + TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); + TCGv zero = tcg_constant_tl(0); + TCGv mone = tcg_constant_tl(-1); /* * Extract START, and shift the operand. * Shifts larger than operand size get zeros. */ tcg_gen_ext8u_tl(s->A0, s->T1); + if (TARGET_LONG_BITS == 64 && ot == MO_32) { + tcg_gen_ext32u_tl(s->T0, s->T0); + } tcg_gen_shr_tl(s->T0, s->T0, s->A0); - bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); - zero = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero); /* - * Extract the LEN into a mask. Lengths larger than - * operand size get all ones. + * Extract the LEN into an inverse mask. Lengths larger than + * operand size get all zeros, length 0 gets all ones. */ tcg_gen_extract_tl(s->A0, s->T1, 8, 8); - tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound, s->A0, bound); - - tcg_gen_movi_tl(s->T1, 1); - tcg_gen_shl_tl(s->T1, s->T1, s->A0); - tcg_gen_subi_tl(s->T1, s->T1, 1); - tcg_gen_and_tl(s->T0, s->T0, s->T1); + tcg_gen_shl_tl(s->T1, mone, s->A0); + tcg_gen_movcond_tl(TCG_COND_LEU, s->T1, s->A0, bound, s->T1, zero); + tcg_gen_andc_tl(s->T0, s->T0, s->T1); gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); @@ -1111,6 +1113,7 @@ static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; + tcg_gen_mov_tl(cpu_cc_src, s->T0); tcg_gen_neg_tl(s->T1, s->T0); tcg_gen_and_tl(s->T0, s->T0, s->T1); tcg_gen_mov_tl(cpu_cc_dst, s->T0); @@ -1121,6 +1124,7 @@ static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode { MemOp ot = decode->op[0].ot; + tcg_gen_mov_tl(cpu_cc_src, s->T0); tcg_gen_subi_tl(s->T1, s->T0, 1); tcg_gen_xor_tl(s->T0, s->T0, s->T1); tcg_gen_mov_tl(cpu_cc_dst, s->T0); @@ -1131,6 +1135,7 @@ static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; + tcg_gen_mov_tl(cpu_cc_src, s->T0); tcg_gen_subi_tl(s->T1, s->T0, 1); tcg_gen_and_tl(s->T0, s->T0, s->T1); tcg_gen_mov_tl(cpu_cc_dst, s->T0); @@ -1140,20 +1145,20 @@ static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; - TCGv bound; + TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); + TCGv zero = tcg_constant_tl(0); + TCGv mone = tcg_constant_tl(-1); - tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]); - bound = tcg_constant_tl(ot == MO_64 ? 63 : 31); + tcg_gen_ext8u_tl(s->T1, s->T1); /* * Note that since we're using BMILG (in order to get O * cleared) we need to store the inverse into C. */ - tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, s->T1, bound); - tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1, bound, bound, s->T1); + tcg_gen_setcond_tl(TCG_COND_LEU, cpu_cc_src, s->T1, bound); - tcg_gen_movi_tl(s->A0, -1); - tcg_gen_shl_tl(s->A0, s->A0, s->T1); + tcg_gen_shl_tl(s->A0, mone, s->T1); + tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero); tcg_gen_andc_tl(s->T0, s->T0, s->A0); gen_op_update1_cc(s); @@ -2283,7 +2288,7 @@ static void gen_VZEROALL(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco { TCGv_ptr ptr = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_t0)); + tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_regs)); gen_helper_memset(ptr, ptr, tcg_constant_i32(0), tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg))); tcg_temp_free_ptr(ptr); diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index cbf0081374..294a18a5b7 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -7085,7 +7085,7 @@ static void ppc_cpu_reset(DeviceState *dev) if (env->mmu_model != POWERPC_MMU_REAL) { ppc_tlb_invalidate_all(env); } - pmu_update_summaries(env); + pmu_mmcr01_updated(env); } /* clean any pending stop state */ diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 94adcb766b..6cf88f635a 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -1358,9 +1358,12 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) /* * We don't want to generate a Hypervisor Emulation Assistance - * Interrupt if we don't have HVB in msr_mask (PAPR mode). + * Interrupt if we don't have HVB in msr_mask (PAPR mode), + * unless running a nested-hv guest, in which case the L1 + * kernel wants the interrupt. */ - if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB)) { + if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) && + !books_vhyp_handles_hv_excp(cpu)) { excp = POWERPC_EXCP_PROGRAM; } @@ -2631,7 +2634,7 @@ void helper_scv(CPUPPCState *env, uint32_t lev) } } -void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) +void helper_pminsn(CPUPPCState *env, uint32_t insn) { CPUState *cs; diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index c0aee5855b..e200091a23 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -46,6 +46,48 @@ void hreg_swap_gpr_tgpr(CPUPPCState *env) env->tgpr[3] = tmp; } +static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) +{ + uint32_t hflags = 0; + +#if defined(TARGET_PPC64) + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) { + hflags |= 1 << HFLAGS_PMCC0; + } + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) { + hflags |= 1 << HFLAGS_PMCC1; + } + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) { + hflags |= 1 << HFLAGS_PMCJCE; + } + +#ifndef CONFIG_USER_ONLY + if (env->pmc_ins_cnt) { + hflags |= 1 << HFLAGS_INSN_CNT; + } + if (env->pmc_ins_cnt & 0x1e) { + hflags |= 1 << HFLAGS_PMC_OTHER; + } +#endif +#endif + + return hflags; +} + +/* Mask of all PMU hflags */ +static uint32_t hreg_compute_pmu_hflags_mask(CPUPPCState *env) +{ + uint32_t hflags_mask = 0; +#if defined(TARGET_PPC64) + hflags_mask |= 1 << HFLAGS_PMCC0; + hflags_mask |= 1 << HFLAGS_PMCC1; + hflags_mask |= 1 << HFLAGS_PMCJCE; + hflags_mask |= 1 << HFLAGS_INSN_CNT; + hflags_mask |= 1 << HFLAGS_PMC_OTHER; +#endif + return hflags_mask; +} + static uint32_t hreg_compute_hflags_value(CPUPPCState *env) { target_ulong msr = env->msr; @@ -103,30 +145,12 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) if (env->spr[SPR_LPCR] & LPCR_HR) { hflags |= 1 << HFLAGS_HR; } - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) { - hflags |= 1 << HFLAGS_PMCC0; - } - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) { - hflags |= 1 << HFLAGS_PMCC1; - } - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) { - hflags |= 1 << HFLAGS_PMCJCE; - } #ifndef CONFIG_USER_ONLY if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) { hflags |= 1 << HFLAGS_HV; } -#if defined(TARGET_PPC64) - if (env->pmc_ins_cnt) { - hflags |= 1 << HFLAGS_INSN_CNT; - } - if (env->pmc_ins_cnt & 0x1e) { - hflags |= 1 << HFLAGS_PMC_OTHER; - } -#endif - /* * This is our encoding for server processors. The architecture * specifies that there is no such thing as userspace with @@ -171,6 +195,8 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) hflags |= dmmu_idx << HFLAGS_DMMU_IDX; #endif + hflags |= hreg_compute_pmu_hflags_value(env); + return hflags | (msr & msr_mask); } @@ -179,6 +205,17 @@ void hreg_compute_hflags(CPUPPCState *env) env->hflags = hreg_compute_hflags_value(env); } +/* + * This can be used as a lighter-weight alternative to hreg_compute_hflags + * when PMU MMCR0 or pmc_ins_cnt changes. pmc_ins_cnt is changed by + * pmu_update_summaries. + */ +void hreg_update_pmu_hflags(CPUPPCState *env) +{ + env->hflags &= ~hreg_compute_pmu_hflags_mask(env); + env->hflags |= hreg_compute_pmu_hflags_value(env); +} + #ifdef CONFIG_DEBUG_TCG void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h index 42f26870b9..8196c1346d 100644 --- a/target/ppc/helper_regs.h +++ b/target/ppc/helper_regs.h @@ -22,6 +22,7 @@ void hreg_swap_gpr_tgpr(CPUPPCState *env); void hreg_compute_hflags(CPUPPCState *env); +void hreg_update_pmu_hflags(CPUPPCState *env); void cpu_interrupt_exittb(CPUState *cs); int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv); diff --git a/target/ppc/machine.c b/target/ppc/machine.c index be6eb3d968..134b16c625 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -21,10 +21,6 @@ static void post_load_update_msr(CPUPPCState *env) */ env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); ppc_store_msr(env, msr); - - if (tcg_enabled()) { - pmu_update_summaries(env); - } } static int get_avr(QEMUFile *f, void *pv, size_t size, @@ -317,6 +313,10 @@ static int cpu_post_load(void *opaque, int version_id) post_load_update_msr(env); + if (tcg_enabled()) { + pmu_mmcr01_updated(env); + } + return 0; } diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c index 1381072b9e..fccd011088 100644 --- a/target/ppc/power8-pmu.c +++ b/target/ppc/power8-pmu.c @@ -31,7 +31,11 @@ static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn) return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE; } -void pmu_update_summaries(CPUPPCState *env) +/* + * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt. + * hflags must subsequently be updated. + */ +static void pmu_update_summaries(CPUPPCState *env) { target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0]; target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1]; @@ -39,7 +43,7 @@ void pmu_update_summaries(CPUPPCState *env) int cyc_cnt = 0; if (mmcr0 & MMCR0_FC) { - goto hflags_calc; + goto out; } if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) { @@ -73,10 +77,19 @@ void pmu_update_summaries(CPUPPCState *env) ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5; cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6; - hflags_calc: + out: env->pmc_ins_cnt = ins_cnt; env->pmc_cyc_cnt = cyc_cnt; - env->hflags = deposit32(env->hflags, HFLAGS_INSN_CNT, 1, ins_cnt != 0); +} + +void pmu_mmcr01_updated(CPUPPCState *env) +{ + pmu_update_summaries(env); + hreg_update_pmu_hflags(env); + /* + * Should this update overflow timers (if mmcr0 is updated) so they + * get set in cpu_post_load? + */ } static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns) @@ -234,18 +247,11 @@ static void pmu_delete_timers(CPUPPCState *env) void helper_store_mmcr0(CPUPPCState *env, target_ulong value) { - bool hflags_pmcc0 = (value & MMCR0_PMCC0) != 0; - bool hflags_pmcc1 = (value & MMCR0_PMCC1) != 0; - pmu_update_cycles(env); env->spr[SPR_POWER_MMCR0] = value; - /* MMCR0 writes can change HFLAGS_PMCC[01] and HFLAGS_INSN_CNT */ - env->hflags = deposit32(env->hflags, HFLAGS_PMCC0, 1, hflags_pmcc0); - env->hflags = deposit32(env->hflags, HFLAGS_PMCC1, 1, hflags_pmcc1); - - pmu_update_summaries(env); + pmu_mmcr01_updated(env); /* Update cycle overflow timers with the current MMCR0 state */ pmu_update_overflow_timers(env); @@ -257,8 +263,7 @@ void helper_store_mmcr1(CPUPPCState *env, uint64_t value) env->spr[SPR_POWER_MMCR1] = value; - /* MMCR1 writes can change HFLAGS_INSN_CNT */ - pmu_update_summaries(env); + pmu_mmcr01_updated(env); } target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn) @@ -287,8 +292,8 @@ static void fire_PMC_interrupt(PowerPCCPU *cpu) env->spr[SPR_POWER_MMCR0] &= ~MMCR0_FCECE; env->spr[SPR_POWER_MMCR0] |= MMCR0_FC; - /* Changing MMCR0_FC requires a new HFLAGS_INSN_CNT calc */ - pmu_update_summaries(env); + /* Changing MMCR0_FC requires summaries and hflags update */ + pmu_mmcr01_updated(env); /* * Delete all pending timers if we need to freeze @@ -299,6 +304,7 @@ static void fire_PMC_interrupt(PowerPCCPU *cpu) } if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) { + /* These MMCR0 bits do not require summaries or hflags update. */ env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE; env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO; } diff --git a/target/ppc/power8-pmu.h b/target/ppc/power8-pmu.h index c0093e2219..775e640053 100644 --- a/target/ppc/power8-pmu.h +++ b/target/ppc/power8-pmu.h @@ -18,10 +18,10 @@ #define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL void cpu_ppc_pmu_init(CPUPPCState *env); -void pmu_update_summaries(CPUPPCState *env); +void pmu_mmcr01_updated(CPUPPCState *env); #else static inline void cpu_ppc_pmu_init(CPUPPCState *env) { } -static inline void pmu_update_summaries(CPUPPCState *env) { } +static inline void pmu_mmcr01_updated(CPUPPCState *env) { } #endif #endif diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 19c1d17cb0..1de7eca9c4 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -3972,6 +3972,7 @@ static void gen_lqarx(DisasContext *ctx) } tcg_temp_free(EA); + tcg_gen_mov_tl(cpu_reserve, EA); tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val)); tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2)); } diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 7741f2eb49..764b76dcc6 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -2231,7 +2231,7 @@ static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a) static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) { const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece, - mask = dup_const(vece, 1 << (elem_width - 1)); + mask = dup_const(vece, 1ULL << (elem_width - 1)); uint64_t i, j; TCGv_i64 lo, hi, t0, t1; diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc index 3281408a87..74e2894462 100644 --- a/target/riscv/insn_trans/trans_privileged.c.inc +++ b/target/riscv/insn_trans/trans_privileged.c.inc @@ -77,6 +77,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) #ifndef CONFIG_USER_ONLY if (has_ext(ctx, RVS)) { decode_save_opc(ctx); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_sret(cpu_pc, cpu_env); tcg_gen_exit_tb(NULL, 0); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; @@ -93,6 +96,9 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a) { #ifndef CONFIG_USER_ONLY decode_save_opc(ctx); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_mret(cpu_pc, cpu_env); tcg_gen_exit_tb(NULL, 0); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; diff --git a/target/riscv/translate.c b/target/riscv/translate.c index db123da5ec..1ed4bb5ec3 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1064,14 +1064,10 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) /* Check for compressed insn */ if (insn_len(opcode) == 2) { - if (!has_ext(ctx, RVC)) { - gen_exception_illegal(ctx); - } else { - ctx->opcode = opcode; - ctx->pc_succ_insn = ctx->base.pc_next + 2; - if (decode_insn16(ctx, opcode)) { - return; - } + ctx->opcode = opcode; + ctx->pc_succ_insn = ctx->base.pc_next + 2; + if (has_ext(ctx, RVC) && decode_insn16(ctx, opcode)) { + return; } } else { uint32_t opcode32 = opcode; diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c index a2329141e8..a7c44ba49d 100644 --- a/target/s390x/arch_dump.c +++ b/target/s390x/arch_dump.c @@ -248,7 +248,7 @@ static int s390x_write_elf64_notes(const char *note_name, notep = g_malloc(note_size); } - memset(notep, 0, sizeof(note)); + memset(notep, 0, note_size); /* Setup note header data */ notep->hdr.n_descsz = cpu_to_be32(content_size); diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 7d6d01325b..8aaf8dd5a3 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -87,6 +87,7 @@ struct CPUArchState { uint64_t cc_vr; uint64_t ex_value; + uint64_t ex_target; uint64_t __excp_addr; uint64_t psa; diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index c3a4f80633..a85c56b4ee 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -604,8 +604,8 @@ void s390_realize_cpu_model(CPUState *cs, Error **errp) #if !defined(CONFIG_USER_ONLY) cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model); if (tcg_enabled()) { - /* basic mode, write the cpu address into the first 4 bit of the ID */ - cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.core_id); + cpu->env.cpuid = deposit64(cpu->env.cpuid, CPU_PHYS_ADDR_SHIFT, + CPU_PHYS_ADDR_BITS, cpu->env.core_id); } #endif } diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h index fb1adc8b21..cc7305ec21 100644 --- a/target/s390x/cpu_models.h +++ b/target/s390x/cpu_models.h @@ -96,10 +96,18 @@ static inline bool s390_known_cpu_type(uint16_t type) { return s390_get_gen_for_cpu_type(type) != 0; } +#define CPU_ID_SHIFT 32 +#define CPU_ID_BITS 24 +/* + * When cpu_id_format is 0 (basic mode), the leftmost 4 bits of cpu_id contain + * the rightmost 4 bits of the physical CPU address. + */ +#define CPU_PHYS_ADDR_BITS 4 +#define CPU_PHYS_ADDR_SHIFT (CPU_ID_SHIFT + CPU_ID_BITS - CPU_PHYS_ADDR_BITS) static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model) { return ((uint64_t)model->cpu_ver << 56) | - ((uint64_t)model->cpu_id << 32) | + ((uint64_t)model->cpu_id << CPU_ID_SHIFT) | ((uint64_t)model->def->type << 16) | (model->def->gen == 7 ? 0 : (uint64_t)model->cpu_id_format << 15); } diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 5d4361d35b..825252d728 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -11,6 +11,7 @@ #define S390X_INTERNAL_H #include "cpu.h" +#include "fpu/softfloat.h" #ifndef CONFIG_USER_ONLY typedef struct LowCore { @@ -299,7 +300,7 @@ uint32_t set_cc_nz_f128(float128 v); uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); -int float_comp_to_cc(CPUS390XState *env, int float_compare); +int float_comp_to_cc(CPUS390XState *env, FloatRelation float_compare); #define DCMASK_ZERO 0x0c00 #define DCMASK_NORMAL 0x0300 diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc index 54d4250c9f..4249632af3 100644 --- a/target/s390x/tcg/insn-data.h.inc +++ b/target/s390x/tcg/insn-data.h.inc @@ -199,8 +199,8 @@ C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64) C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64) /* COMPARE HALFWORD RELATIVE LONG */ - C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32) - C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64) + C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps32) + C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps64) /* COMPARE HIGH */ C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32) C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32) @@ -486,7 +486,7 @@ F(0xb343, LCXBR, RRE, Z, x2h, x2l, new_P, x1, negf128, f128, IF_BFP) F(0xb373, LCDFR, RRE, FPSSH, 0, f2, new, f1, negf64, 0, IF_AFP1 | IF_AFP2) /* LOAD COUNT TO BLOCK BOUNDARY */ - C(0xe727, LCBB, RXE, V, la2, 0, r1, 0, lcbb, 0) + C(0xe727, LCBB, RXE, V, la2, 0, new, r1_32, lcbb, 0) /* LOAD HALFWORD */ C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0) C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0) @@ -564,7 +564,7 @@ C(0xec46, LOCGHI, RIE_g, LOC2, r1, i2, r1, 0, loc, 0) C(0xec4e, LOCHHI, RIE_g, LOC2, r1_sr32, i2, new, r1_32h, loc, 0) /* LOAD HIGH ON CONDITION */ - C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2, new, r1_32h, loc, 0) + C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2_sr32, new, r1_32h, loc, 0) C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0) /* LOAD PAIR DISJOINT */ D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) @@ -606,7 +606,7 @@ F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP) F(0xed05, LXDB, RXE, Z, 0, m2_64, new_P, x1, lxdb, 0, IF_BFP) F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_P, x1, lxeb, 0, IF_BFP) - F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) + F(0xb324, LDER, RRE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1) /* LOAD ROUNDED */ F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP) diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index 3758b9e688..7e7de5e2f1 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -2618,6 +2618,7 @@ void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) that ex_value is non-zero, which flags that we are in a state that requires such execution. */ env->ex_value = insn | ilen; + env->ex_target = addr; } uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 1e599ac259..0885bf2641 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -1585,18 +1585,51 @@ static DisasJumpType op_bal(DisasContext *s, DisasOps *o) } } +/* + * Disassemble the target of a branch. The results are returned in a form + * suitable for passing into help_branch(): + * + * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd + * branches, whose DisasContext *S contains the relative immediate field RI, + * are considered fixed. All the other branches are considered computed. + * - int IMM is the value of RI. + * - TCGv_i64 CDEST is the address of the computed target. + */ +#define disas_jdest(s, ri, is_imm, imm, cdest) do { \ + if (have_field(s, ri)) { \ + if (unlikely(s->ex_value)) { \ + cdest = tcg_temp_new_i64(); \ + tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\ + tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \ + is_imm = false; \ + } else { \ + is_imm = true; \ + } \ + } else { \ + is_imm = false; \ + } \ + imm = is_imm ? get_field(s, ri) : 0; \ +} while (false) + static DisasJumpType op_basi(DisasContext *s, DisasOps *o) { + DisasCompare c; + bool is_imm; + int imm; + pc_to_link_info(o->out, s, s->pc_tmp); - return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2); + + disas_jdest(s, i2, is_imm, imm, o->in2); + disas_jcc(s, &c, 0xf); + return help_branch(s, &c, is_imm, imm, o->in2); } static DisasJumpType op_bc(DisasContext *s, DisasOps *o) { int m1 = get_field(s, m1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; /* BCR with R2 = 0 causes no branching */ if (have_field(s, r2) && get_field(s, r2) == 0) { @@ -1613,6 +1646,7 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o) return DISAS_NEXT; } + disas_jdest(s, i2, is_imm, imm, o->in2); disas_jcc(s, &c, m1); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1620,10 +1654,10 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o) static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; TCGv_i64 t; + int imm; c.cond = TCG_COND_NE; c.is_64 = false; @@ -1638,6 +1672,7 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) tcg_gen_extrl_i64_i32(c.u.s32.a, t); tcg_temp_free_i64(t); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1668,9 +1703,9 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; c.cond = TCG_COND_NE; c.is_64 = true; @@ -1681,6 +1716,7 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) c.u.s64.a = regs[r1]; c.u.s64.b = tcg_const_i64(0); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1688,10 +1724,10 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); int r3 = get_field(s, r3); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; TCGv_i64 t; + int imm; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = false; @@ -1707,6 +1743,7 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) store_reg32_i64(r1, t); tcg_temp_free_i64(t); + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1714,9 +1751,9 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) { int r1 = get_field(s, r1); int r3 = get_field(s, r3); - bool is_imm = have_field(s, i2); - int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; + bool is_imm; + int imm; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); c.is_64 = true; @@ -1733,6 +1770,7 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) c.u.s64.a = regs[r1]; c.g1 = true; + disas_jdest(s, i2, is_imm, imm, o->in2); return help_branch(s, &c, is_imm, imm, o->in2); } @@ -1750,10 +1788,9 @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o) c.u.s64.a = o->in1; c.u.s64.b = o->in2; - is_imm = have_field(s, i4); - if (is_imm) { - imm = get_field(s, i4); - } else { + o->out = NULL; + disas_jdest(s, i4, is_imm, imm, o->out); + if (!is_imm && !o->out) { imm = 0; o->out = get_address(s, 0, get_field(s, b4), get_field(s, d4)); @@ -5962,9 +5999,23 @@ static void in2_a2(DisasContext *s, DisasOps *o) } #define SPEC_in2_a2 0 +static TCGv gen_ri2(DisasContext *s) +{ + TCGv ri2 = NULL; + bool is_imm; + int imm; + + disas_jdest(s, i2, is_imm, imm, ri2); + if (is_imm) { + ri2 = tcg_constant_i64(s->base.pc_next + imm * 2); + } + + return ri2; +} + static void in2_ri2(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); + o->in2 = gen_ri2(s); } #define SPEC_in2_ri2 0 @@ -6050,31 +6101,38 @@ static void in2_m2_64a(DisasContext *s, DisasOps *o) #define SPEC_in2_m2_64a 0 #endif +static void in2_mri2_16s(DisasContext *s, DisasOps *o) +{ + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s)); +} +#define SPEC_in2_mri2_16s 0 + static void in2_mri2_16u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_16u 0 static void in2_mri2_32s(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_32s 0 static void in2_mri2_32u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_32u 0 static void in2_mri2_64(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_64 0 diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 453268392b..827cee25af 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -47,7 +47,7 @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, SuperHCPU *cpu = SUPERH_CPU(cs); cpu->env.pc = tb_pc(tb); - cpu->env.flags = tb->flags; + cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK; } static void superh_restore_state_to_opc(CPUState *cs, diff --git a/tests/docker/dockerfiles/debian-xtensa-cross.docker b/tests/docker/dockerfiles/debian-xtensa-cross.docker index 2f11b3b7bc..aebfabdd6e 100644 --- a/tests/docker/dockerfiles/debian-xtensa-cross.docker +++ b/tests/docker/dockerfiles/debian-xtensa-cross.docker @@ -5,7 +5,7 @@ # using a prebuilt toolchains for Xtensa cores from: # https://github.com/foss-xtensa/toolchain/releases # -FROM docker.io/library/debian:stretch-slim +FROM docker.io/library/debian:11-slim RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ diff --git a/tests/qtest/fuzz-lsi53c895a-test.c b/tests/qtest/fuzz-lsi53c895a-test.c index 392a7ae7ed..9b007def26 100644 --- a/tests/qtest/fuzz-lsi53c895a-test.c +++ b/tests/qtest/fuzz-lsi53c895a-test.c @@ -8,6 +8,36 @@ #include "qemu/osdep.h" #include "libqtest.h" +/* + * This used to trigger a DMA reentrancy issue + * leading to memory corruption bugs like stack + * overflow or use-after-free + * https://gitlab.com/qemu-project/qemu/-/issues/1563 + */ +static void test_lsi_dma_reentrancy(void) +{ + QTestState *s; + + s = qtest_init("-M q35 -m 512M -nodefaults " + "-blockdev driver=null-co,node-name=null0 " + "-device lsi53c810 -device scsi-cd,drive=null0"); + + qtest_outl(s, 0xcf8, 0x80000804); /* PCI Command Register */ + qtest_outw(s, 0xcfc, 0x7); /* Enables accesses */ + qtest_outl(s, 0xcf8, 0x80000814); /* Memory Bar 1 */ + qtest_outl(s, 0xcfc, 0xff100000); /* Set MMIO Address*/ + qtest_outl(s, 0xcf8, 0x80000818); /* Memory Bar 2 */ + qtest_outl(s, 0xcfc, 0xff000000); /* Set RAM Address*/ + qtest_writel(s, 0xff000000, 0xc0000024); + qtest_writel(s, 0xff000114, 0x00000080); + qtest_writel(s, 0xff00012c, 0xff000000); + qtest_writel(s, 0xff000004, 0xff000114); + qtest_writel(s, 0xff000008, 0xff100014); + qtest_writel(s, 0xff10002f, 0x000000ff); + + qtest_quit(s); +} + /* * This used to trigger a UAF in lsi_do_msgout() * https://gitlab.com/qemu-project/qemu/-/issues/972 @@ -120,5 +150,8 @@ int main(int argc, char **argv) qtest_add_func("fuzz/lsi53c895a/lsi_do_msgout_cancel_req", test_lsi_do_msgout_cancel_req); + qtest_add_func("fuzz/lsi53c895a/lsi_dma_reentrancy", + test_lsi_dma_reentrancy); + return g_test_run(); } diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index 75257f2b29..14bc013181 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -117,6 +117,8 @@ endif %: %.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) +%: %.S + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) else # For softmmu targets we include a different Makefile fragement as the # build options for bare programs are usually pretty different. They diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index 81831cafbc..bafd8c2180 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -14,7 +14,7 @@ config-cc.mak: Makefile I386_SRCS=$(notdir $(wildcard $(I386_SRC)/*.c)) ALL_X86_TESTS=$(I386_SRCS:.c=) SKIP_I386_TESTS=test-i386-ssse3 test-avx test-3dnow test-mmx -X86_64_TESTS:=$(filter test-i386-bmi2 $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) +X86_64_TESTS:=$(filter test-i386-adcox test-i386-bmi2 $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) test-i386-sse-exceptions: CFLAGS += -msse4.1 -mfpmath=sse run-test-i386-sse-exceptions: QEMU_OPTS += -cpu max @@ -28,6 +28,10 @@ test-i386-bmi2: CFLAGS=-O2 run-test-i386-bmi2: QEMU_OPTS += -cpu max run-plugin-test-i386-bmi2-%: QEMU_OPTS += -cpu max +test-i386-adcox: CFLAGS=-O2 +run-test-i386-adcox: QEMU_OPTS += -cpu max +run-plugin-test-i386-adcox-%: QEMU_OPTS += -cpu max + # # hello-i386 is a barebones app # diff --git a/tests/tcg/i386/test-i386-adcox.c b/tests/tcg/i386/test-i386-adcox.c new file mode 100644 index 0000000000..16169efff8 --- /dev/null +++ b/tests/tcg/i386/test-i386-adcox.c @@ -0,0 +1,75 @@ +/* See if various BMI2 instructions give expected results */ +#include +#include +#include + +#define CC_C 1 +#define CC_O (1 << 11) + +#ifdef __x86_64__ +#define REG uint64_t +#else +#define REG uint32_t +#endif + +void test_adox_adcx(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_operand) +{ + REG flags; + REG out_adcx, out_adox; + + asm("pushf; pop %0" : "=r"(flags)); + flags &= ~(CC_C | CC_O); + flags |= (in_c ? CC_C : 0); + flags |= (in_o ? CC_O : 0); + + out_adcx = adcx_operand; + out_adox = adox_operand; + asm("push %0; popf;" + "adox %3, %2;" + "adcx %3, %1;" + "pushf; pop %0" + : "+r" (flags), "+r" (out_adcx), "+r" (out_adox) + : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox)); + + assert(out_adcx == in_c + adcx_operand - 1); + assert(out_adox == in_o + adox_operand - 1); + assert(!!(flags & CC_C) == (in_c || adcx_operand)); + assert(!!(flags & CC_O) == (in_o || adox_operand)); +} + +void test_adcx_adox(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_operand) +{ + REG flags; + REG out_adcx, out_adox; + + asm("pushf; pop %0" : "=r"(flags)); + flags &= ~(CC_C | CC_O); + flags |= (in_c ? CC_C : 0); + flags |= (in_o ? CC_O : 0); + + out_adcx = adcx_operand; + out_adox = adox_operand; + asm("push %0; popf;" + "adcx %3, %1;" + "adox %3, %2;" + "pushf; pop %0" + : "+r" (flags), "+r" (out_adcx), "+r" (out_adox) + : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox)); + + assert(out_adcx == in_c + adcx_operand - 1); + assert(out_adox == in_o + adox_operand - 1); + assert(!!(flags & CC_C) == (in_c || adcx_operand)); + assert(!!(flags & CC_O) == (in_o || adox_operand)); +} + +int main(int argc, char *argv[]) { + /* try all combinations of input CF, input OF, CF from op1+op2, OF from op2+op1 */ + int i; + for (i = 0; i <= 15; i++) { + printf("%d\n", i); + test_adcx_adox(!!(i & 1), !!(i & 2), !!(i & 4), !!(i & 8)); + test_adox_adcx(!!(i & 1), !!(i & 2), !!(i & 4), !!(i & 8)); + } + return 0; +} + diff --git a/tests/tcg/i386/test-i386-bmi2.c b/tests/tcg/i386/test-i386-bmi2.c index 5fadf47510..0244df7987 100644 --- a/tests/tcg/i386/test-i386-bmi2.c +++ b/tests/tcg/i386/test-i386-bmi2.c @@ -3,34 +3,40 @@ #include #include +#ifdef __x86_64 +typedef uint64_t reg_t; +#else +typedef uint32_t reg_t; +#endif + #define insn1q(name, arg0) \ -static inline uint64_t name##q(uint64_t arg0) \ +static inline reg_t name##q(reg_t arg0) \ { \ - uint64_t result64; \ + reg_t result64; \ asm volatile (#name "q %1, %0" : "=r"(result64) : "rm"(arg0)); \ return result64; \ } #define insn1l(name, arg0) \ -static inline uint32_t name##l(uint32_t arg0) \ +static inline reg_t name##l(reg_t arg0) \ { \ - uint32_t result32; \ + reg_t result32; \ asm volatile (#name "l %k1, %k0" : "=r"(result32) : "rm"(arg0)); \ return result32; \ } #define insn2q(name, arg0, c0, arg1, c1) \ -static inline uint64_t name##q(uint64_t arg0, uint64_t arg1) \ +static inline reg_t name##q(reg_t arg0, reg_t arg1) \ { \ - uint64_t result64; \ + reg_t result64; \ asm volatile (#name "q %2, %1, %0" : "=r"(result64) : c0(arg0), c1(arg1)); \ return result64; \ } #define insn2l(name, arg0, c0, arg1, c1) \ -static inline uint32_t name##l(uint32_t arg0, uint32_t arg1) \ +static inline reg_t name##l(reg_t arg0, reg_t arg1) \ { \ - uint32_t result32; \ + reg_t result32; \ asm volatile (#name "l %k2, %k1, %k0" : "=r"(result32) : c0(arg0), c1(arg1)); \ return result32; \ } @@ -65,130 +71,143 @@ insn1l(blsr, src) int main(int argc, char *argv[]) { uint64_t ehlo = 0x202020204f4c4845ull; uint64_t mask = 0xa080800302020001ull; - uint32_t result32; + reg_t result; #ifdef __x86_64 - uint64_t result64; - /* 64 bits */ - result64 = andnq(mask, ehlo); - assert(result64 == 0x002020204d4c4844); + result = andnq(mask, ehlo); + assert(result == 0x002020204d4c4844); - result64 = pextq(ehlo, mask); - assert(result64 == 133); + result = pextq(ehlo, mask); + assert(result == 133); - result64 = pdepq(result64, mask); - assert(result64 == (ehlo & mask)); + result = pdepq(result, mask); + assert(result == (ehlo & mask)); - result64 = pextq(-1ull, mask); - assert(result64 == 511); /* mask has 9 bits set */ + result = pextq(-1ull, mask); + assert(result == 511); /* mask has 9 bits set */ - result64 = pdepq(-1ull, mask); - assert(result64 == mask); + result = pdepq(-1ull, mask); + assert(result == mask); - result64 = bextrq(mask, 0x3f00); - assert(result64 == (mask & ~INT64_MIN)); + result = bextrq(mask, 0x3f00); + assert(result == (mask & ~INT64_MIN)); - result64 = bextrq(mask, 0x1038); - assert(result64 == 0xa0); + result = bextrq(mask, 0x1038); + assert(result == 0xa0); - result64 = bextrq(mask, 0x10f8); - assert(result64 == 0); + result = bextrq(mask, 0x10f8); + assert(result == 0); - result64 = blsiq(0x30); - assert(result64 == 0x10); + result = bextrq(0xfedcba9876543210ull, 0x7f00); + assert(result == 0xfedcba9876543210ull); - result64 = blsiq(0x30ull << 32); - assert(result64 == 0x10ull << 32); + result = blsiq(0x30); + assert(result == 0x10); - result64 = blsmskq(0x30); - assert(result64 == 0x1f); + result = blsiq(0x30ull << 32); + assert(result == 0x10ull << 32); - result64 = blsrq(0x30); - assert(result64 == 0x20); + result = blsmskq(0x30); + assert(result == 0x1f); - result64 = blsrq(0x30ull << 32); - assert(result64 == 0x20ull << 32); + result = blsrq(0x30); + assert(result == 0x20); - result64 = bzhiq(mask, 0x3f); - assert(result64 == (mask & ~INT64_MIN)); + result = blsrq(0x30ull << 32); + assert(result == 0x20ull << 32); - result64 = bzhiq(mask, 0x1f); - assert(result64 == (mask & ~(-1 << 30))); + result = bzhiq(mask, 0x3f); + assert(result == (mask & ~INT64_MIN)); - result64 = rorxq(0x2132435465768798, 8); - assert(result64 == 0x9821324354657687); + result = bzhiq(mask, 0x1f); + assert(result == (mask & ~(-1 << 30))); - result64 = sarxq(0xffeeddccbbaa9988, 8); - assert(result64 == 0xffffeeddccbbaa99); + result = bzhiq(mask, 0x40); + assert(result == mask); - result64 = sarxq(0x77eeddccbbaa9988, 8 | 64); - assert(result64 == 0x0077eeddccbbaa99); + result = rorxq(0x2132435465768798, 8); + assert(result == 0x9821324354657687); - result64 = shrxq(0xffeeddccbbaa9988, 8); - assert(result64 == 0x00ffeeddccbbaa99); + result = sarxq(0xffeeddccbbaa9988, 8); + assert(result == 0xffffeeddccbbaa99); - result64 = shrxq(0x77eeddccbbaa9988, 8 | 192); - assert(result64 == 0x0077eeddccbbaa99); + result = sarxq(0x77eeddccbbaa9988, 8 | 64); + assert(result == 0x0077eeddccbbaa99); - result64 = shlxq(0xffeeddccbbaa9988, 8); - assert(result64 == 0xeeddccbbaa998800); + result = shrxq(0xffeeddccbbaa9988, 8); + assert(result == 0x00ffeeddccbbaa99); + + result = shrxq(0x77eeddccbbaa9988, 8 | 192); + assert(result == 0x0077eeddccbbaa99); + + result = shlxq(0xffeeddccbbaa9988, 8); + assert(result == 0xeeddccbbaa998800); #endif /* 32 bits */ - result32 = andnl(mask, ehlo); - assert(result32 == 0x04d4c4844); + result = andnl(mask, ehlo); + assert(result == 0x04d4c4844); - result32 = pextl((uint32_t) ehlo, mask); - assert(result32 == 5); + result = pextl((uint32_t) ehlo, mask); + assert(result == 5); - result32 = pdepl(result32, mask); - assert(result32 == (uint32_t)(ehlo & mask)); + result = pdepl(result, mask); + assert(result == (uint32_t)(ehlo & mask)); - result32 = pextl(-1u, mask); - assert(result32 == 7); /* mask has 3 bits set */ + result = pextl(-1u, mask); + assert(result == 7); /* mask has 3 bits set */ - result32 = pdepl(-1u, mask); - assert(result32 == (uint32_t)mask); + result = pdepl(-1u, mask); + assert(result == (uint32_t)mask); - result32 = bextrl(mask, 0x1f00); - assert(result32 == (mask & ~INT32_MIN)); + result = bextrl(mask, 0x1f00); + assert(result == (mask & ~INT32_MIN)); - result32 = bextrl(ehlo, 0x1018); - assert(result32 == 0x4f); + result = bextrl(ehlo, 0x1018); + assert(result == 0x4f); - result32 = bextrl(mask, 0x1038); - assert(result32 == 0); + result = bextrl(mask, 0x1038); + assert(result == 0); - result32 = blsil(0xffff); - assert(result32 == 1); + result = bextrl((reg_t)0x8f635a775ad3b9b4ull, 0x3018); + assert(result == 0x5a); - result32 = blsmskl(0x300); - assert(result32 == 0x1ff); + result = bextrl((reg_t)0xfedcba9876543210ull, 0x7f00); + assert(result == 0x76543210u); - result32 = blsrl(0xffc); - assert(result32 == 0xff8); + result = bextrl(-1, 0); + assert(result == 0); - result32 = bzhil(mask, 0xf); - assert(result32 == 1); + result = blsil(0xffff); + assert(result == 1); - result32 = rorxl(0x65768798, 8); - assert(result32 == 0x98657687); + result = blsmskl(0x300); + assert(result == 0x1ff); - result32 = sarxl(0xffeeddcc, 8); - assert(result32 == 0xffffeedd); + result = blsrl(0xffc); + assert(result == 0xff8); - result32 = sarxl(0x77eeddcc, 8 | 32); - assert(result32 == 0x0077eedd); + result = bzhil(mask, 0xf); + assert(result == 1); - result32 = shrxl(0xffeeddcc, 8); - assert(result32 == 0x00ffeedd); + result = rorxl(0x65768798, 8); + assert(result == 0x98657687); - result32 = shrxl(0x77eeddcc, 8 | 128); - assert(result32 == 0x0077eedd); + result = sarxl(0xffeeddcc, 8); + assert(result == 0xffffeedd); - result32 = shlxl(0xffeeddcc, 8); - assert(result32 == 0xeeddcc00); + result = sarxl(0x77eeddcc, 8 | 32); + assert(result == 0x0077eedd); + + result = shrxl(0xffeeddcc, 8); + assert(result == 0x00ffeedd); + + result = shrxl(0x77eeddcc, 8 | 128); + assert(result == 0x0077eedd); + + result = shlxl(0xffeeddcc, 8); + assert(result == 0xeeddcc00); return 0; } diff --git a/tests/tcg/multiarch/linux/linux-test.c b/tests/tcg/multiarch/linux/linux-test.c index 5a2a4f2258..64f57cb287 100644 --- a/tests/tcg/multiarch/linux/linux-test.c +++ b/tests/tcg/multiarch/linux/linux-test.c @@ -354,13 +354,17 @@ static void test_pipe(void) if (FD_ISSET(fds[0], &rfds)) { chk_error(read(fds[0], &ch, 1)); rcount++; - if (rcount >= WCOUNT_MAX) + if (rcount >= WCOUNT_MAX) { break; + } } if (FD_ISSET(fds[1], &wfds)) { ch = 'a'; chk_error(write(fds[1], &ch, 1)); wcount++; + if (wcount >= WCOUNT_MAX) { + break; + } } } } diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target index b5b89dfb0e..cc3ed65ffd 100644 --- a/tests/tcg/riscv64/Makefile.target +++ b/tests/tcg/riscv64/Makefile.target @@ -4,3 +4,9 @@ VPATH += $(SRC_PATH)/tests/tcg/riscv64 TESTS += test-div TESTS += noexec + +# Disable compressed instructions for test-noc +TESTS += test-noc +test-noc: LDFLAGS = -nostdlib -static +run-test-noc: QEMU_OPTS += -cpu rv64,c=false +run-plugin-test-noc-%: QEMU_OPTS += -cpu rv64,c=false diff --git a/tests/tcg/riscv64/test-noc.S b/tests/tcg/riscv64/test-noc.S new file mode 100644 index 0000000000..e29d60c8b3 --- /dev/null +++ b/tests/tcg/riscv64/test-noc.S @@ -0,0 +1,32 @@ +#include + + .text + .globl _start +_start: + .option norvc + li a0, 4 /* SIGILL */ + la a1, sa + li a2, 0 + li a3, 8 + li a7, __NR_rt_sigaction + scall + + .option rvc + li a0, 1 + j exit + .option norvc + +pass: + li a0, 0 +exit: + li a7, __NR_exit + scall + + .data + /* struct kernel_sigaction sa = { .sa_handler = pass }; */ + .type sa, @object + .size sa, 32 +sa: + .dword pass + .zero 24 + diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index 07fcc6d0ce..cb90d4183d 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -26,6 +26,8 @@ TESTS+=branch-relative-long TESTS+=noexec Z13_TESTS=vistr +Z13_TESTS+=lcbb +Z13_TESTS+=locfhr $(Z13_TESTS): CFLAGS+=-march=z13 -O2 TESTS+=$(Z13_TESTS) @@ -54,7 +56,16 @@ run-gdbstub-signals-s390x: signals-s390x --bin $< --test $(S390X_SRC)/gdbstub/test-signals-s390x.py, \ mixing signals and debugging) -EXTRA_RUNS += run-gdbstub-signals-s390x +hello-s390x-asm: CFLAGS+=-nostdlib + +run-gdbstub-svc: hello-s390x-asm + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(HAVE_GDB_BIN) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(S390X_SRC)/gdbstub/test-svc.py, \ + single-stepping svc) + +EXTRA_RUNS += run-gdbstub-signals-s390x run-gdbstub-svc endif # MVX versions of sha512 diff --git a/tests/tcg/s390x/gdbstub/test-svc.py b/tests/tcg/s390x/gdbstub/test-svc.py new file mode 100644 index 0000000000..7851ca7284 --- /dev/null +++ b/tests/tcg/s390x/gdbstub/test-svc.py @@ -0,0 +1,64 @@ +"""Test single-stepping SVC. + +This runs as a sourced script (via -x, via run-test.py).""" +from __future__ import print_function +import gdb +import sys + + +n_failures = 0 + + +def report(cond, msg): + """Report success/fail of a test""" + if cond: + print("PASS: {}".format(msg)) + else: + print("FAIL: {}".format(msg)) + global n_failures + n_failures += 1 + + +def run_test(): + """Run through the tests one by one""" + report("lghi\t" in gdb.execute("x/i $pc", False, True), "insn #1") + gdb.execute("si") + report("larl\t" in gdb.execute("x/i $pc", False, True), "insn #2") + gdb.execute("si") + report("lghi\t" in gdb.execute("x/i $pc", False, True), "insn #3") + gdb.execute("si") + report("svc\t" in gdb.execute("x/i $pc", False, True), "insn #4") + gdb.execute("si") + report("xgr\t" in gdb.execute("x/i $pc", False, True), "insn #5") + gdb.execute("si") + report("svc\t" in gdb.execute("x/i $pc", False, True), "insn #6") + gdb.execute("si") + + +def main(): + """Prepare the environment and run through the tests""" + try: + inferior = gdb.selected_inferior() + print("ATTACHED: {}".format(inferior.architecture().name())) + except (gdb.error, AttributeError): + print("SKIPPING (not connected)") + exit(0) + + if gdb.parse_and_eval('$pc') == 0: + print("SKIP: PC not set") + exit(0) + + try: + # These are not very useful in scripts + gdb.execute("set pagination off") + gdb.execute("set confirm off") + + # Run the actual tests + run_test() + except gdb.error: + report(False, "GDB Exception: {}".format(sys.exc_info()[0])) + print("All tests complete: %d failures" % n_failures) + exit(n_failures) + + +main() diff --git a/tests/tcg/s390x/hello-s390x-asm.S b/tests/tcg/s390x/hello-s390x-asm.S new file mode 100644 index 0000000000..2e9faa1604 --- /dev/null +++ b/tests/tcg/s390x/hello-s390x-asm.S @@ -0,0 +1,20 @@ +/* + * Hello, World! in assembly. + */ + +.globl _start +_start: + +/* puts("Hello, World!"); */ +lghi %r2,1 +larl %r3,foo +lghi %r4,foo_end-foo +svc 4 + +/* exit(0); */ +xgr %r2,%r2 +svc 1 + +.align 2 +foo: .asciz "Hello, World!\n" +foo_end: diff --git a/tests/tcg/s390x/lcbb.c b/tests/tcg/s390x/lcbb.c new file mode 100644 index 0000000000..8d368e0998 --- /dev/null +++ b/tests/tcg/s390x/lcbb.c @@ -0,0 +1,51 @@ +/* + * Test the LCBB instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +static inline __attribute__((__always_inline__)) void +lcbb(long *r1, void *dxb2, int m3, int *cc) +{ + asm("lcbb %[r1],%[dxb2],%[m3]\n" + "ipm %[cc]" + : [r1] "+r" (*r1), [cc] "=r" (*cc) + : [dxb2] "R" (*(char *)dxb2), [m3] "i" (m3) + : "cc"); + *cc = (*cc >> 28) & 3; +} + +static char buf[0x1000] __attribute__((aligned(0x1000))); + +static inline __attribute__((__always_inline__)) void +test_lcbb(void *p, int m3, int exp_r1, int exp_cc) +{ + long r1 = 0xfedcba9876543210; + int cc; + + lcbb(&r1, p, m3, &cc); + assert(r1 == (0xfedcba9800000000 | exp_r1)); + assert(cc == exp_cc); +} + +int main(void) +{ + test_lcbb(&buf[0], 0, 16, 0); + test_lcbb(&buf[63], 0, 1, 3); + test_lcbb(&buf[0], 1, 16, 0); + test_lcbb(&buf[127], 1, 1, 3); + test_lcbb(&buf[0], 2, 16, 0); + test_lcbb(&buf[255], 2, 1, 3); + test_lcbb(&buf[0], 3, 16, 0); + test_lcbb(&buf[511], 3, 1, 3); + test_lcbb(&buf[0], 4, 16, 0); + test_lcbb(&buf[1023], 4, 1, 3); + test_lcbb(&buf[0], 5, 16, 0); + test_lcbb(&buf[2047], 5, 1, 3); + test_lcbb(&buf[0], 6, 16, 0); + test_lcbb(&buf[4095], 6, 1, 3); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/locfhr.c b/tests/tcg/s390x/locfhr.c new file mode 100644 index 0000000000..ab9ff6e449 --- /dev/null +++ b/tests/tcg/s390x/locfhr.c @@ -0,0 +1,29 @@ +/* + * Test the LOCFHR instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +static inline __attribute__((__always_inline__)) long +locfhr(long r1, long r2, int m3, int cc) +{ + cc <<= 28; + asm("spm %[cc]\n" + "locfhr %[r1],%[r2],%[m3]\n" + : [r1] "+r" (r1) + : [cc] "r" (cc), [r2] "r" (r2), [m3] "i" (m3) + : "cc"); + return r1; +} + +int main(void) +{ + assert(locfhr(0x1111111122222222, 0x3333333344444444, 8, 0) == + 0x3333333322222222); + assert(locfhr(0x5555555566666666, 0x7777777788888888, 11, 1) == + 0x5555555566666666); + + return EXIT_SUCCESS; +} diff --git a/ui/console.c b/ui/console.c index 3c0d9b061a..646202214a 100644 --- a/ui/console.c +++ b/ui/console.c @@ -307,7 +307,7 @@ static bool png_save(int fd, pixman_image_t *image, Error **errp) png_struct *png_ptr; png_info *info_ptr; g_autoptr(pixman_image_t) linebuf = - qemu_pixman_linebuf_create(PIXMAN_a8r8g8b8, width); + qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, width); uint8_t *buf = (uint8_t *)pixman_image_get_data(linebuf); FILE *f = fdopen(fd, "wb"); int y; @@ -337,7 +337,7 @@ static bool png_save(int fd, pixman_image_t *image, Error **errp) png_init_io(png_ptr, f); png_set_IHDR(png_ptr, info_ptr, width, height, 8, - PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE, + PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); png_write_info(png_ptr, info_ptr); diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index e84431790c..e99e3b0d8c 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -88,8 +88,8 @@ void gd_egl_draw(VirtualConsole *vc) #endif gd_egl_scanout_flush(&vc->gfx.dcl, 0, 0, vc->gfx.w, vc->gfx.h); - vc->gfx.scale_x = (double)ww / vc->gfx.w; - vc->gfx.scale_y = (double)wh / vc->gfx.h; + vc->gfx.scale_x = (double)ww / surface_width(vc->gfx.ds); + vc->gfx.scale_y = (double)wh / surface_height(vc->gfx.ds); glFlush(); #ifdef CONFIG_GBM @@ -256,8 +256,9 @@ void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, } gd_egl_scanout_texture(dcl, dmabuf->texture, - false, dmabuf->width, dmabuf->height, - 0, 0, dmabuf->width, dmabuf->height); + dmabuf->y0_top, dmabuf->width, dmabuf->height, + dmabuf->x, dmabuf->y, dmabuf->scanout_width, + dmabuf->scanout_height); if (dmabuf->allow_fences) { vc->gfx.guest_fb.dmabuf = dmabuf; diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c index 7696df1f6b..1605818bd1 100644 --- a/ui/gtk-gl-area.c +++ b/ui/gtk-gl-area.c @@ -298,8 +298,9 @@ void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl, } gd_gl_area_scanout_texture(dcl, dmabuf->texture, - false, dmabuf->width, dmabuf->height, - 0, 0, dmabuf->width, dmabuf->height); + dmabuf->y0_top, dmabuf->width, dmabuf->height, + dmabuf->x, dmabuf->y, dmabuf->scanout_width, + dmabuf->scanout_height); if (dmabuf->allow_fences) { vc->gfx.guest_fb.dmabuf = dmabuf; diff --git a/ui/gtk.c b/ui/gtk.c index 4817623c8f..e681e8c319 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -868,7 +868,6 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, { VirtualConsole *vc = opaque; GtkDisplayState *s = vc->s; - GdkWindow *window; int x, y; int mx, my; int fbh, fbw; @@ -881,10 +880,9 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, fbw = surface_width(vc->gfx.ds) * vc->gfx.scale_x; fbh = surface_height(vc->gfx.ds) * vc->gfx.scale_y; - window = gtk_widget_get_window(vc->gfx.drawing_area); - ww = gdk_window_get_width(window); - wh = gdk_window_get_height(window); - ws = gdk_window_get_scale_factor(window); + ww = gtk_widget_get_allocated_width(widget); + wh = gtk_widget_get_allocated_height(widget); + ws = gtk_widget_get_scale_factor(widget); mx = my = 0; if (ww > fbw) { @@ -1783,7 +1781,9 @@ static void gd_vc_chr_accept_input(Chardev *chr) VCChardev *vcd = VC_CHARDEV(chr); VirtualConsole *vc = vcd->console; - gd_vc_send_chars(vc); + if (vc) { + gd_vc_send_chars(vc); + } } static void gd_vc_chr_set_echo(Chardev *chr, bool echo) diff --git a/ui/sdl2-gl.c b/ui/sdl2-gl.c index 39cab8cde7..bbfa70eac3 100644 --- a/ui/sdl2-gl.c +++ b/ui/sdl2-gl.c @@ -67,6 +67,10 @@ void sdl2_gl_update(DisplayChangeListener *dcl, assert(scon->opengl); + if (!scon->real_window) { + return; + } + SDL_GL_MakeCurrent(scon->real_window, scon->winctx); surface_gl_update_texture(scon->gls, scon->surface, x, y, w, h); scon->updates++; diff --git a/ui/sdl2.c b/ui/sdl2.c index 8cb77416af..d630459b78 100644 --- a/ui/sdl2.c +++ b/ui/sdl2.c @@ -849,7 +849,14 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR /* only available since SDL 2.0.8 */ SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0"); #endif +#ifndef CONFIG_WIN32 + /* QEMU uses its own low level keyboard hook procecure on Windows */ SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1"); +#endif +#ifdef SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED + SDL_SetHint(SDL_HINT_ALLOW_ALT_TAB_WHILE_GRABBED, "0"); +#endif + SDL_SetHint(SDL_HINT_WINDOWS_NO_CLOSE_ON_ALT_F4, "1"); memset(&info, 0, sizeof(info)); SDL_VERSION(&info.version); diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c index 886f9bf611..fcca7ec632 100644 --- a/ui/vnc-jobs.c +++ b/ui/vnc-jobs.c @@ -250,12 +250,13 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) /* Here job can only be NULL if queue->exit is true */ job = QTAILQ_FIRST(&queue->jobs); vnc_unlock_queue(queue); - assert(job->vs->magic == VNC_MAGIC); if (queue->exit) { return -1; } + assert(job->vs->magic == VNC_MAGIC); + vnc_lock_output(job->vs); if (job->vs->ioc == NULL || job->vs->abort == true) { vnc_unlock_output(job->vs); diff --git a/ui/vnc.c b/ui/vnc.c index 88f55cbf3c..1856d57380 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -3765,7 +3765,7 @@ static int vnc_display_get_address(const char *addrstr, addr->type = SOCKET_ADDRESS_TYPE_INET; inet = &addr->u.inet; - if (addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { + if (hostlen && addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { inet->host = g_strndup(addrstr + 1, hostlen - 2); } else { inet->host = g_strndup(addrstr, hostlen); diff --git a/util/async.c b/util/async.c index 63434ddae4..f449c3444e 100644 --- a/util/async.c +++ b/util/async.c @@ -158,7 +158,21 @@ int aio_bh_poll(AioContext *ctx) int ret = 0; QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); + + /* + * GCC13 [-Werror=dangling-pointer=] complains that the local variable + * 'slice' is being stored in the global 'ctx->bh_slice_list' but the + * list is emptied before this function returns. + */ +#if !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wdangling-pointer=" +#endif QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); +#if !defined(__clang__) +#pragma GCC diagnostic pop +#endif while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { QEMUBH *bh; diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index e11a8a022e..1683aa1105 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -127,6 +127,8 @@ static bool fdmon_epoll_try_enable(AioContext *ctx) bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) { + bool ok; + if (ctx->epollfd < 0) { return false; } @@ -136,14 +138,23 @@ bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) return false; } - if (npfd >= EPOLL_ENABLE_THRESHOLD) { - if (fdmon_epoll_try_enable(ctx)) { - return true; - } else { - fdmon_epoll_disable(ctx); - } + if (npfd < EPOLL_ENABLE_THRESHOLD) { + return false; } - return false; + + /* The list must not change while we add fds to epoll */ + if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { + return false; + } + + ok = fdmon_epoll_try_enable(ctx); + + qemu_lockcnt_inc_and_unlock(&ctx->list_lock); + + if (!ok) { + fdmon_epoll_disable(ctx); + } + return ok; } void fdmon_epoll_setup(AioContext *ctx) diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c index 0d1520caac..4670867e1f 100644 --- a/util/vfio-helpers.c +++ b/util/vfio-helpers.c @@ -106,15 +106,17 @@ struct QEMUVFIOState { */ static char *sysfs_find_group_file(const char *device, Error **errp) { + g_autoptr(GError) gerr = NULL; char *sysfs_link; char *sysfs_group; char *p; char *path = NULL; sysfs_link = g_strdup_printf("/sys/bus/pci/devices/%s/iommu_group", device); - sysfs_group = g_malloc0(PATH_MAX); - if (readlink(sysfs_link, sysfs_group, PATH_MAX - 1) == -1) { - error_setg_errno(errp, errno, "Failed to find iommu group sysfs path"); + sysfs_group = g_file_read_link(sysfs_link, &gerr); + if (gerr) { + error_setg(errp, "Failed to find iommu group sysfs path: %s", + gerr->message); goto out; } p = strrchr(sysfs_group, '/');