util: Make qemu_prealloc_mem() optionally consume a ThreadContext

... and implement it under POSIX. When a ThreadContext is provided,
create new threads via the context such that these new threads obtain a
properly configured CPU affinity.

Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Message-Id: <20221014134720.168738-6-david@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
This commit is contained in:
David Hildenbrand 2022-10-14 15:47:18 +02:00
parent 10218ae6d0
commit e04a34e55c
5 changed files with 22 additions and 11 deletions

View file

@ -232,7 +232,8 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value,
void *ptr = memory_region_get_ram_ptr(&backend->mr);
uint64_t sz = memory_region_size(&backend->mr);
qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, &local_err);
qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, NULL,
&local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@ -384,7 +385,7 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
*/
if (backend->prealloc) {
qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz,
backend->prealloc_threads, &local_err);
backend->prealloc_threads, NULL, &local_err);
if (local_err) {
goto out;
}

View file

@ -467,7 +467,7 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
int fd = memory_region_get_fd(&vmem->memdev->mr);
Error *local_err = NULL;
qemu_prealloc_mem(fd, area, size, 1, &local_err);
qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err);
if (local_err) {
static bool warned;

View file

@ -568,6 +568,8 @@ unsigned long qemu_getauxval(unsigned long type);
void qemu_set_tty_echo(int fd, bool echo);
typedef struct ThreadContext ThreadContext;
/**
* qemu_prealloc_mem:
* @fd: the fd mapped into the area, -1 for anonymous memory
@ -582,7 +584,7 @@ void qemu_set_tty_echo(int fd, bool echo);
* after allocating file blocks for mapped files.
*/
void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
Error **errp);
ThreadContext *tc, Error **errp);
/**
* qemu_get_pid_name:

View file

@ -419,7 +419,8 @@ static inline int get_memset_num_threads(size_t hpagesize, size_t numpages,
}
static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
int max_threads, bool use_madv_populate_write)
int max_threads, ThreadContext *tc,
bool use_madv_populate_write)
{
static gsize initialized = 0;
MemsetContext context = {
@ -458,9 +459,16 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
context.threads[i].numpages = numpages_per_thread + (i < leftover);
context.threads[i].hpagesize = hpagesize;
context.threads[i].context = &context;
qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
touch_fn, &context.threads[i],
QEMU_THREAD_JOINABLE);
if (tc) {
thread_context_create_thread(tc, &context.threads[i].pgthread,
"touch_pages",
touch_fn, &context.threads[i],
QEMU_THREAD_JOINABLE);
} else {
qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
touch_fn, &context.threads[i],
QEMU_THREAD_JOINABLE);
}
addr += context.threads[i].numpages * hpagesize;
}
@ -496,7 +504,7 @@ static bool madv_populate_write_possible(char *area, size_t pagesize)
}
void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
Error **errp)
ThreadContext *tc, Error **errp)
{
static gsize initialized;
int ret;
@ -537,7 +545,7 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
}
/* touch pages simultaneously */
ret = touch_all_pages(area, hpagesize, numpages, max_threads,
ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc,
use_madv_populate_write);
if (ret) {
error_setg_errno(errp, -ret,

View file

@ -269,7 +269,7 @@ int getpagesize(void)
}
void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
Error **errp)
ThreadContext *tc, Error **errp)
{
int i;
size_t pagesize = qemu_real_host_page_size();