1
0
mirror of https://github.com/systemd/systemd.git synced 2025-01-25 10:04:04 +03:00

Merge pull request #23963 from matoro/main

Replace __sync intrinsics with __atomic
This commit is contained in:
Lennart Poettering 2022-07-21 00:10:01 +02:00 committed by GitHub
commit 4a0cb5474e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 99 additions and 45 deletions

3
README
View File

@ -207,7 +207,8 @@ REQUIREMENTS:
python >= 3.5
meson >= 0.53.2
ninja
gcc, awk, sed, grep, and similar tools
gcc >= 4.7
awk, sed, grep, and similar tools
clang >= 10.0, llvm >= 10.0 (optional, required to build BPF programs
from source code in C)
gnu-efi >= 3.0.5 (optional, required for systemd-boot)

View File

@ -1147,7 +1147,7 @@ void reset_cached_pid(void) {
pid_t getpid_cached(void) {
static bool installed = false;
pid_t current_value;
pid_t current_value = CACHED_PID_UNSET;
/* getpid_cached() is much like getpid(), but caches the value in local memory, to avoid having to invoke a
* system call each time. This restores glibc behaviour from before 2.24, when getpid() was unconditionally
@ -1158,7 +1158,13 @@ pid_t getpid_cached(void) {
* https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=c579f48edba88380635ab98cb612030e3ed8691e
*/
current_value = __sync_val_compare_and_swap(&cached_pid, CACHED_PID_UNSET, CACHED_PID_BUSY);
__atomic_compare_exchange_n(
&cached_pid,
&current_value,
CACHED_PID_BUSY,
false,
__ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
switch (current_value) {

View File

@ -28,24 +28,31 @@ static void sigbus_push(void *addr) {
assert(addr);
/* Find a free place, increase the number of entries and leave, if we can */
for (size_t u = 0; u < SIGBUS_QUEUE_MAX; u++)
if (__sync_bool_compare_and_swap(&sigbus_queue[u], NULL, addr)) {
__sync_fetch_and_add(&n_sigbus_queue, 1);
for (size_t u = 0; u < SIGBUS_QUEUE_MAX; u++) {
/* OK to initialize this here since we haven't started the atomic ops yet */
void *tmp = NULL;
if (__atomic_compare_exchange_n(&sigbus_queue[u], &tmp, addr, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
__atomic_fetch_add(&n_sigbus_queue, 1, __ATOMIC_SEQ_CST);
return;
}
}
/* If we can't, make sure the queue size is out of bounds, to
* mark it as overflow */
for (;;) {
unsigned c;
sig_atomic_t c;
__sync_synchronize();
__atomic_thread_fence(__ATOMIC_SEQ_CST);
c = n_sigbus_queue;
if (c > SIGBUS_QUEUE_MAX) /* already overflow */
return;
if (__sync_bool_compare_and_swap(&n_sigbus_queue, c, c + SIGBUS_QUEUE_MAX))
/* OK if we clobber c here, since we either immediately return
* or it will be immediately reinitialized on next loop */
if (__atomic_compare_exchange_n(&n_sigbus_queue, &c, c + SIGBUS_QUEUE_MAX, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
return;
}
}
@ -56,7 +63,7 @@ int sigbus_pop(void **ret) {
for (;;) {
unsigned u, c;
__sync_synchronize();
__atomic_thread_fence(__ATOMIC_SEQ_CST);
c = n_sigbus_queue;
if (_likely_(c == 0))
@ -72,8 +79,13 @@ int sigbus_pop(void **ret) {
if (!addr)
continue;
if (__sync_bool_compare_and_swap(&sigbus_queue[u], addr, NULL)) {
__sync_fetch_and_sub(&n_sigbus_queue, 1);
/* OK if we clobber addr here, since we either immediately return
* or it will be immediately reinitialized on next loop */
if (__atomic_compare_exchange_n(&sigbus_queue[u], &addr, NULL, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
__atomic_fetch_sub(&n_sigbus_queue, 1, __ATOMIC_SEQ_CST);
/* If we successfully entered this if condition, addr won't
* have been modified since its assignment, so safe to use it */
*ret = addr;
return 1;
}

View File

@ -106,10 +106,10 @@
* on this macro will run concurrently to all other code conditionalized
* the same way, there's no ordering or completion enforced. */
#define ONCE __ONCE(UNIQ_T(_once_, UNIQ))
#define __ONCE(o) \
({ \
static bool (o) = false; \
__sync_bool_compare_and_swap(&(o), false, true); \
#define __ONCE(o) \
({ \
static bool (o) = false; \
__atomic_exchange_n(&(o), true, __ATOMIC_SEQ_CST); \
})
#undef MAX

View File

@ -166,19 +166,28 @@ static void managed_journal_file_set_offline_internal(ManagedJournalFile *f) {
for (;;) {
switch (f->file->offline_state) {
case OFFLINE_CANCEL:
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
case OFFLINE_CANCEL: {
OfflineState tmp_state = OFFLINE_CANCEL;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_DONE,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
return;
case OFFLINE_AGAIN_FROM_SYNCING:
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
case OFFLINE_AGAIN_FROM_SYNCING: {
OfflineState tmp_state = OFFLINE_AGAIN_FROM_SYNCING;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_SYNCING,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
break;
case OFFLINE_AGAIN_FROM_OFFLINING:
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
case OFFLINE_AGAIN_FROM_OFFLINING: {
OfflineState tmp_state = OFFLINE_AGAIN_FROM_OFFLINING;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_SYNCING,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
break;
case OFFLINE_SYNCING:
@ -189,8 +198,12 @@ static void managed_journal_file_set_offline_internal(ManagedJournalFile *f) {
(void) fsync(f->file->fd);
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
continue;
{
OfflineState tmp_state = OFFLINE_SYNCING;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_OFFLINING,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
f->file->header->state = f->file->archive ? STATE_ARCHIVED : STATE_OFFLINE;
(void) fsync(f->file->fd);
@ -221,9 +234,12 @@ static void managed_journal_file_set_offline_internal(ManagedJournalFile *f) {
break;
case OFFLINE_OFFLINING:
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
case OFFLINE_OFFLINING: {
OfflineState tmp_state = OFFLINE_OFFLINING;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_DONE,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
_fallthrough_;
case OFFLINE_DONE:
return;
@ -253,19 +269,28 @@ static bool managed_journal_file_set_offline_try_restart(ManagedJournalFile *f)
case OFFLINE_AGAIN_FROM_OFFLINING:
return true;
case OFFLINE_CANCEL:
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
case OFFLINE_CANCEL: {
OfflineState tmp_state = OFFLINE_CANCEL;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_AGAIN_FROM_SYNCING,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
return true;
case OFFLINE_SYNCING:
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
case OFFLINE_SYNCING: {
OfflineState tmp_state = OFFLINE_SYNCING;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_AGAIN_FROM_SYNCING,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
return true;
case OFFLINE_OFFLINING:
if (!__sync_bool_compare_and_swap(&f->file->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
case OFFLINE_OFFLINING: {
OfflineState tmp_state = OFFLINE_OFFLINING;
if (!__atomic_compare_exchange_n(&f->file->offline_state, &tmp_state, OFFLINE_AGAIN_FROM_OFFLINING,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
return true;
default:
@ -352,7 +377,7 @@ int managed_journal_file_set_offline(ManagedJournalFile *f, bool wait) {
bool managed_journal_file_is_offlining(ManagedJournalFile *f) {
assert(f);
__sync_synchronize();
__atomic_thread_fence(__ATOMIC_SEQ_CST);
if (IN_SET(f->file->offline_state, OFFLINE_DONE, OFFLINE_JOINED))
return false;

View File

@ -204,23 +204,32 @@ static int journal_file_set_online(JournalFile *f) {
wait = false;
break;
case OFFLINE_SYNCING:
if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
continue;
case OFFLINE_SYNCING: {
OfflineState tmp_state = OFFLINE_SYNCING;
if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
/* Canceled syncing prior to offlining, no need to wait. */
wait = false;
break;
case OFFLINE_AGAIN_FROM_SYNCING:
if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
continue;
case OFFLINE_AGAIN_FROM_SYNCING: {
OfflineState tmp_state = OFFLINE_AGAIN_FROM_SYNCING;
if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
/* Canceled restart from syncing, no need to wait. */
wait = false;
break;
case OFFLINE_AGAIN_FROM_OFFLINING:
if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
continue;
case OFFLINE_AGAIN_FROM_OFFLINING: {
OfflineState tmp_state = OFFLINE_AGAIN_FROM_OFFLINING;
if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
continue;
}
/* Canceled restart from offlining, must wait for offlining to complete however. */
_fallthrough_;
default: {
@ -1810,7 +1819,7 @@ static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
if (o->object.type != OBJECT_ENTRY)
return -EINVAL;
__sync_synchronize();
__atomic_thread_fence(__ATOMIC_SEQ_CST);
/* Link up the entry itself */
r = link_entry_into_array(f,
@ -1910,7 +1919,7 @@ void journal_file_post_change(JournalFile *f) {
* trigger IN_MODIFY by truncating the journal file to its
* current size which triggers IN_MODIFY. */
__sync_synchronize();
__atomic_thread_fence(__ATOMIC_SEQ_CST);
if (ftruncate(f->fd, f->last_stat.st_size) < 0)
log_debug_errno(errno, "Failed to truncate file to its own size: %m");

View File

@ -58,7 +58,8 @@ retry:
fd_inc_sndbuf(fd, SNDBUF_SIZE);
if (!__sync_bool_compare_and_swap(&fd_plus_one, 0, fd+1)) {
if (!__atomic_compare_exchange_n(&fd_plus_one, &(int){0}, fd+1,
false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
safe_close(fd);
goto retry;
}