diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 11:56:46 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 16:07:44 +0100 |
commit | d73415a315471ac0b127ed3fad45c8ec5d711de1 (patch) | |
tree | bae20b3a39968fdfb4340b1a39b533333a8e6fd0 /migration | |
parent | ed7db34b5aedba4487fd949b2e545eef954f093e (diff) | |
download | qemu-d73415a315471ac0b127ed3fad45c8ec5d711de1.zip |
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:
$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)
Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.
This patch was generated using:
$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done
I manually fixed line-wrap issues and misaligned rST tables.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r-- | migration/colo-failover.c | 4 | ||||
-rw-r--r-- | migration/migration.c | 8 | ||||
-rw-r--r-- | migration/multifd.c | 18 | ||||
-rw-r--r-- | migration/postcopy-ram.c | 34 | ||||
-rw-r--r-- | migration/rdma.c | 34 |
5 files changed, 49 insertions, 49 deletions
diff --git a/migration/colo-failover.c b/migration/colo-failover.c index b717edc8e2..42453481c4 100644 --- a/migration/colo-failover.c +++ b/migration/colo-failover.c @@ -63,7 +63,7 @@ FailoverStatus failover_set_state(FailoverStatus old_state, { FailoverStatus old; - old = atomic_cmpxchg(&failover_state, old_state, new_state); + old = qatomic_cmpxchg(&failover_state, old_state, new_state); if (old == old_state) { trace_colo_failover_set_state(FailoverStatus_str(new_state)); } @@ -72,7 +72,7 @@ FailoverStatus failover_set_state(FailoverStatus old_state, FailoverStatus failover_get_state(void) { - return atomic_read(&failover_state); + return qatomic_read(&failover_state); } void qmp_x_colo_lost_heartbeat(Error **errp) diff --git a/migration/migration.c b/migration/migration.c index 58a5452471..d9d1e0b190 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1595,7 +1595,7 @@ void qmp_migrate_start_postcopy(Error **errp) * we don't error if migration has finished since that would be racy * with issuing this command. */ - atomic_set(&s->start_postcopy, true); + qatomic_set(&s->start_postcopy, true); } /* shared migration helpers */ @@ -1603,7 +1603,7 @@ void qmp_migrate_start_postcopy(Error **errp) void migrate_set_state(int *state, int old_state, int new_state) { assert(new_state < MIGRATION_STATUS__MAX); - if (atomic_cmpxchg(state, old_state, new_state) == old_state) { + if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { trace_migrate_set_state(MigrationStatus_str(new_state)); migrate_generate_event(new_state); } @@ -1954,7 +1954,7 @@ void qmp_migrate_recover(const char *uri, Error **errp) return; } - if (atomic_cmpxchg(&mis->postcopy_recover_triggered, + if (qatomic_cmpxchg(&mis->postcopy_recover_triggered, false, true) == true) { error_setg(errp, "Migrate recovery is triggered already"); return; @@ -3329,7 +3329,7 @@ static MigIterateState migration_iteration_run(MigrationState *s) if (pending_size && pending_size >= s->threshold_size) { /* Still a significant amount to transfer */ if (!in_postcopy && pend_pre <= s->threshold_size && - atomic_read(&s->start_postcopy)) { + qatomic_read(&s->start_postcopy)) { if (postcopy_start(s)) { error_report("%s: postcopy failed to start", __func__); } diff --git a/migration/multifd.c b/migration/multifd.c index ac84a61797..fd57378db8 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -410,7 +410,7 @@ static int multifd_send_pages(QEMUFile *f) MultiFDPages_t *pages = multifd_send_state->pages; uint64_t transferred; - if (atomic_read(&multifd_send_state->exiting)) { + if (qatomic_read(&multifd_send_state->exiting)) { return -1; } @@ -508,7 +508,7 @@ static void multifd_send_terminate_threads(Error *err) * threads at the same time, we can end calling this function * twice. */ - if (atomic_xchg(&multifd_send_state->exiting, 1)) { + if (qatomic_xchg(&multifd_send_state->exiting, 1)) { return; } @@ -632,7 +632,7 @@ static void *multifd_send_thread(void *opaque) while (true) { qemu_sem_wait(&p->sem); - if (atomic_read(&multifd_send_state->exiting)) { + if (qatomic_read(&multifd_send_state->exiting)) { break; } qemu_mutex_lock(&p->mutex); @@ -760,7 +760,7 @@ int multifd_save_setup(Error **errp) multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); multifd_send_state->pages = multifd_pages_init(page_count); qemu_sem_init(&multifd_send_state->channels_ready, 0); - atomic_set(&multifd_send_state->exiting, 0); + qatomic_set(&multifd_send_state->exiting, 0); multifd_send_state->ops = multifd_ops[migrate_multifd_compression()]; for (i = 0; i < thread_count; i++) { @@ -997,7 +997,7 @@ int multifd_load_setup(Error **errp) thread_count = migrate_multifd_channels(); multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state)); multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count); - atomic_set(&multifd_recv_state->count, 0); + qatomic_set(&multifd_recv_state->count, 0); qemu_sem_init(&multifd_recv_state->sem_sync, 0); multifd_recv_state->ops = multifd_ops[migrate_multifd_compression()]; @@ -1037,7 +1037,7 @@ bool multifd_recv_all_channels_created(void) return true; } - return thread_count == atomic_read(&multifd_recv_state->count); + return thread_count == qatomic_read(&multifd_recv_state->count); } /* @@ -1058,7 +1058,7 @@ bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) error_propagate_prepend(errp, local_err, "failed to receive packet" " via multifd channel %d: ", - atomic_read(&multifd_recv_state->count)); + qatomic_read(&multifd_recv_state->count)); return false; } trace_multifd_recv_new_channel(id); @@ -1079,7 +1079,7 @@ bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) p->running = true; qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p, QEMU_THREAD_JOINABLE); - atomic_inc(&multifd_recv_state->count); - return atomic_read(&multifd_recv_state->count) == + qatomic_inc(&multifd_recv_state->count); + return qatomic_read(&multifd_recv_state->count) == migrate_multifd_channels(); } diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index baf094ba3a..1654ff11a5 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -530,7 +530,7 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) Error *local_err = NULL; /* Let the fault thread quit */ - atomic_set(&mis->fault_thread_quit, 1); + qatomic_set(&mis->fault_thread_quit, 1); postcopy_fault_thread_notify(mis); trace_postcopy_ram_incoming_cleanup_join(); qemu_thread_join(&mis->fault_thread); @@ -742,12 +742,12 @@ static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, low_time_offset = get_low_time_offset(dc); if (dc->vcpu_addr[cpu] == 0) { - atomic_inc(&dc->smp_cpus_down); + qatomic_inc(&dc->smp_cpus_down); } - atomic_xchg(&dc->last_begin, low_time_offset); - atomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); - atomic_xchg(&dc->vcpu_addr[cpu], addr); + qatomic_xchg(&dc->last_begin, low_time_offset); + qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); + qatomic_xchg(&dc->vcpu_addr[cpu], addr); /* * check it here, not at the beginning of the function, @@ -756,9 +756,9 @@ static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, */ already_received = ramblock_recv_bitmap_test(rb, (void *)addr); if (already_received) { - atomic_xchg(&dc->vcpu_addr[cpu], 0); - atomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); - atomic_dec(&dc->smp_cpus_down); + qatomic_xchg(&dc->vcpu_addr[cpu], 0); + qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); + qatomic_dec(&dc->smp_cpus_down); } trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], cpu, already_received); @@ -813,28 +813,28 @@ static void mark_postcopy_blocktime_end(uintptr_t addr) for (i = 0; i < smp_cpus; i++) { uint32_t vcpu_blocktime = 0; - read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); - if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || + read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); + if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || read_vcpu_time == 0) { continue; } - atomic_xchg(&dc->vcpu_addr[i], 0); + qatomic_xchg(&dc->vcpu_addr[i], 0); vcpu_blocktime = low_time_offset - read_vcpu_time; affected_cpu += 1; /* we need to know is that mark_postcopy_end was due to * faulted page, another possible case it's prefetched * page and in that case we shouldn't be here */ if (!vcpu_total_blocktime && - atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { + qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { vcpu_total_blocktime = true; } /* continue cycle, due to one page could affect several vCPUs */ dc->vcpu_blocktime[i] += vcpu_blocktime; } - atomic_sub(&dc->smp_cpus_down, affected_cpu); + qatomic_sub(&dc->smp_cpus_down, affected_cpu); if (vcpu_total_blocktime) { - dc->total_blocktime += low_time_offset - atomic_fetch_add( + dc->total_blocktime += low_time_offset - qatomic_fetch_add( &dc->last_begin, 0); } trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, @@ -928,7 +928,7 @@ static void *postcopy_ram_fault_thread(void *opaque) error_report("%s: read() failed", __func__); } - if (atomic_read(&mis->fault_thread_quit)) { + if (qatomic_read(&mis->fault_thread_quit)) { trace_postcopy_ram_fault_thread_quit(); break; } @@ -1410,13 +1410,13 @@ static PostcopyState incoming_postcopy_state; PostcopyState postcopy_state_get(void) { - return atomic_mb_read(&incoming_postcopy_state); + return qatomic_mb_read(&incoming_postcopy_state); } /* Set the state and return the old state */ PostcopyState postcopy_state_set(PostcopyState new_state) { - return atomic_xchg(&incoming_postcopy_state, new_state); + return qatomic_xchg(&incoming_postcopy_state, new_state); } /* Register a handler for external shared memory postcopy diff --git a/migration/rdma.c b/migration/rdma.c index 3bd30d46ad..0340841fad 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -2678,7 +2678,7 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc, size_t len = 0; RCU_READ_LOCK_GUARD(); - rdma = atomic_rcu_read(&rioc->rdmaout); + rdma = qatomic_rcu_read(&rioc->rdmaout); if (!rdma) { return -EIO; @@ -2760,7 +2760,7 @@ static ssize_t qio_channel_rdma_readv(QIOChannel *ioc, size_t done = 0; RCU_READ_LOCK_GUARD(); - rdma = atomic_rcu_read(&rioc->rdmain); + rdma = qatomic_rcu_read(&rioc->rdmain); if (!rdma) { return -EIO; @@ -2875,9 +2875,9 @@ qio_channel_rdma_source_prepare(GSource *source, RCU_READ_LOCK_GUARD(); if (rsource->condition == G_IO_IN) { - rdma = atomic_rcu_read(&rsource->rioc->rdmain); + rdma = qatomic_rcu_read(&rsource->rioc->rdmain); } else { - rdma = atomic_rcu_read(&rsource->rioc->rdmaout); + rdma = qatomic_rcu_read(&rsource->rioc->rdmaout); } if (!rdma) { @@ -2902,9 +2902,9 @@ qio_channel_rdma_source_check(GSource *source) RCU_READ_LOCK_GUARD(); if (rsource->condition == G_IO_IN) { - rdma = atomic_rcu_read(&rsource->rioc->rdmain); + rdma = qatomic_rcu_read(&rsource->rioc->rdmain); } else { - rdma = atomic_rcu_read(&rsource->rioc->rdmaout); + rdma = qatomic_rcu_read(&rsource->rioc->rdmaout); } if (!rdma) { @@ -2932,9 +2932,9 @@ qio_channel_rdma_source_dispatch(GSource *source, RCU_READ_LOCK_GUARD(); if (rsource->condition == G_IO_IN) { - rdma = atomic_rcu_read(&rsource->rioc->rdmain); + rdma = qatomic_rcu_read(&rsource->rioc->rdmain); } else { - rdma = atomic_rcu_read(&rsource->rioc->rdmaout); + rdma = qatomic_rcu_read(&rsource->rioc->rdmaout); } if (!rdma) { @@ -3035,12 +3035,12 @@ static int qio_channel_rdma_close(QIOChannel *ioc, rdmain = rioc->rdmain; if (rdmain) { - atomic_rcu_set(&rioc->rdmain, NULL); + qatomic_rcu_set(&rioc->rdmain, NULL); } rdmaout = rioc->rdmaout; if (rdmaout) { - atomic_rcu_set(&rioc->rdmaout, NULL); + qatomic_rcu_set(&rioc->rdmaout, NULL); } rcu->rdmain = rdmain; @@ -3060,8 +3060,8 @@ qio_channel_rdma_shutdown(QIOChannel *ioc, RCU_READ_LOCK_GUARD(); - rdmain = atomic_rcu_read(&rioc->rdmain); - rdmaout = atomic_rcu_read(&rioc->rdmain); + rdmain = qatomic_rcu_read(&rioc->rdmain); + rdmaout = qatomic_rcu_read(&rioc->rdmain); switch (how) { case QIO_CHANNEL_SHUTDOWN_READ: @@ -3131,7 +3131,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, int ret; RCU_READ_LOCK_GUARD(); - rdma = atomic_rcu_read(&rioc->rdmaout); + rdma = qatomic_rcu_read(&rioc->rdmaout); if (!rdma) { return -EIO; @@ -3451,7 +3451,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque) int i = 0; RCU_READ_LOCK_GUARD(); - rdma = atomic_rcu_read(&rioc->rdmain); + rdma = qatomic_rcu_read(&rioc->rdmain); if (!rdma) { return -EIO; @@ -3714,7 +3714,7 @@ rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name) int found = -1; RCU_READ_LOCK_GUARD(); - rdma = atomic_rcu_read(&rioc->rdmain); + rdma = qatomic_rcu_read(&rioc->rdmain); if (!rdma) { return -EIO; @@ -3762,7 +3762,7 @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque, RDMAContext *rdma; RCU_READ_LOCK_GUARD(); - rdma = atomic_rcu_read(&rioc->rdmaout); + rdma = qatomic_rcu_read(&rioc->rdmaout); if (!rdma) { return -EIO; } @@ -3793,7 +3793,7 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, int ret = 0; RCU_READ_LOCK_GUARD(); - rdma = atomic_rcu_read(&rioc->rdmaout); + rdma = qatomic_rcu_read(&rioc->rdmaout); if (!rdma) { return -EIO; } |