diff --git a/Userland/Libraries/LibC/sys/types.h b/Userland/Libraries/LibC/sys/types.h index 0eb5967987e29bc..180179c33f0bfe4 100644 --- a/Userland/Libraries/LibC/sys/types.h +++ b/Userland/Libraries/LibC/sys/types.h @@ -75,8 +75,8 @@ typedef struct __pthread_mutexattr_t { } pthread_mutexattr_t; typedef struct __pthread_cond_t { + pthread_mutex_t* mutex; uint32_t value; - uint32_t previous; int clockid; // clockid_t } pthread_cond_t; diff --git a/Userland/Libraries/LibPthread/CMakeLists.txt b/Userland/Libraries/LibPthread/CMakeLists.txt index 74896b497616f57..bb6c95319b8ff1a 100644 --- a/Userland/Libraries/LibPthread/CMakeLists.txt +++ b/Userland/Libraries/LibPthread/CMakeLists.txt @@ -1,6 +1,7 @@ set(SOURCES forward.cpp pthread.cpp + pthread_cond.cpp pthread_once.cpp semaphore.cpp ) diff --git a/Userland/Libraries/LibPthread/pthread.cpp b/Userland/Libraries/LibPthread/pthread.cpp index 84b2a14329f2426..18b758a4306cd86 100644 --- a/Userland/Libraries/LibPthread/pthread.cpp +++ b/Userland/Libraries/LibPthread/pthread.cpp @@ -445,78 +445,6 @@ int pthread_setschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] in return 0; } -int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr) -{ - cond->value = 0; - cond->previous = 0; - cond->clockid = attr ? attr->clockid : CLOCK_MONOTONIC_COARSE; - return 0; -} - -int pthread_cond_destroy(pthread_cond_t*) -{ - return 0; -} - -static int cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime) -{ - u32 value = cond->value; - cond->previous = value; - pthread_mutex_unlock(mutex); - int rc = futex_wait(&cond->value, value, abstime, cond->clockid); - pthread_mutex_lock(mutex); - if (rc < 0 && errno != EAGAIN) - return errno; - return 0; -} - -int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex) -{ - int rc = cond_wait(cond, mutex, nullptr); - VERIFY(rc == 0); - return 0; -} - -int pthread_condattr_init(pthread_condattr_t* attr) -{ - attr->clockid = CLOCK_MONOTONIC_COARSE; - return 0; -} - -int pthread_condattr_destroy(pthread_condattr_t*) -{ - return 0; -} - -int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock) -{ - attr->clockid = clock; - return 0; -} - -int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime) -{ - return cond_wait(cond, mutex, abstime); -} - -int pthread_cond_signal(pthread_cond_t* cond) -{ - u32 value = cond->previous + 1; - cond->value = value; - int rc = futex(&cond->value, FUTEX_WAKE, 1, nullptr, nullptr, 0); - VERIFY(rc >= 0); - return 0; -} - -int pthread_cond_broadcast(pthread_cond_t* cond) -{ - u32 value = cond->previous + 1; - cond->value = value; - int rc = futex(&cond->value, FUTEX_WAKE, INT32_MAX, nullptr, nullptr, 0); - VERIFY(rc >= 0); - return 0; -} - // libgcc expects this function to exist in libpthread, even // if it is not implemented. int pthread_cancel(pthread_t) diff --git a/Userland/Libraries/LibPthread/pthread_cond.cpp b/Userland/Libraries/LibPthread/pthread_cond.cpp new file mode 100644 index 000000000000000..2c1178bcc30a6dd --- /dev/null +++ b/Userland/Libraries/LibPthread/pthread_cond.cpp @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2021, Sergey Bugaev + * Copyright (c) 2019, Andreas Kling + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +// Condition variable attributes. + +int pthread_condattr_init(pthread_condattr_t* attr) +{ + attr->clockid = CLOCK_MONOTONIC_COARSE; + return 0; +} + +int pthread_condattr_destroy(pthread_condattr_t*) +{ + return 0; +} + +int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock) +{ + attr->clockid = clock; + return 0; +} + +// Condition variables. + +// cond->value is the generation number (number of times the variable has been +// signaled) multiplied by INCREMENT, or'ed with the NEED_TO_WAKE flags. It's +// done this way instead of putting the flags into the high bits because the +// sequence number can easily overflow, which is completely fine but should not +// cause it to corrupt the flags. +static constexpr u32 NEED_TO_WAKE_ONE = 1; +static constexpr u32 NEED_TO_WAKE_ALL = 2; +static constexpr u32 INCREMENT = 4; + +int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr) +{ + cond->mutex = nullptr; + cond->value = 0; + cond->clockid = attr ? attr->clockid : CLOCK_MONOTONIC_COARSE; + return 0; +} + +int pthread_cond_destroy(pthread_cond_t*) +{ + return 0; +} + +int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex) +{ + return pthread_cond_timedwait(cond, mutex, nullptr); +} + +int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime) +{ + // Save the mutex this condition variable is associated with. We don't (yet) + // support changing this mutex once set. + pthread_mutex_t* old_mutex = AK::atomic_exchange(&cond->mutex, mutex, AK::memory_order_relaxed); + if (old_mutex && old_mutex != mutex) + TODO(); + + // Fetch the current value, and record that we're about to wait. Fetching + // the current value has to be done while we hold the mutex, because the + // value might change as soon as we unlock it. + u32 value = AK::atomic_fetch_or(&cond->value, NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL, AK::memory_order_relaxed) | NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL; + pthread_mutex_unlock(mutex); + int rc = futex_wait(&cond->value, value, abstime, cond->clockid); + if (rc < 0 && errno != EAGAIN) + return errno; + + // We might have been re-queued onto the mutex while we were sleeping. Take + // the pessimistic locking path. + __pthread_mutex_lock_pessimistic_np(mutex); + return 0; +} + +int pthread_cond_signal(pthread_cond_t* cond) +{ + // Increment the generation. + u32 value = AK::atomic_fetch_add(&cond->value, INCREMENT, AK::memory_order_relaxed); + // Fast path: nobody's waiting (or at least, nobody has to be woken). + if (!(value & NEED_TO_WAKE_ONE)) [[likely]] + return 0; + + // Wake someone, and clear the NEED_TO_WAKE_ONE flag if there was nobody for + // us to wake, to take the fast path the next time. Since we only learn + // whether there has been somebody waiting or not after we have tried to + // wake them, it would make sense for us to clear the flag after trying to + // wake someone up and seeing there was nobody waiting; but that would race + // with somebody else setting the flag. Therefore, we do it like this: + // attempt to clear the flag first... + value = AK::atomic_fetch_and(&cond->value, ~NEED_TO_WAKE_ONE, AK::memory_order_relaxed); + // ...check if it was already cleared by someone else... + if (!(value & NEED_TO_WAKE_ONE)) [[likely]] + return 0; + // ...try to wake someone... + int rc = futex_wake(&cond->value, 1); + VERIFY(rc >= 0); + // ...and if we have woken someone, put the flag back. + if (rc > 0) + AK::atomic_fetch_or(&cond->value, NEED_TO_WAKE_ONE, AK::memory_order_relaxed); + + return 0; +} + +int pthread_cond_broadcast(pthread_cond_t* cond) +{ + // Increment the generation. + u32 value = AK::atomic_fetch_add(&cond->value, INCREMENT, AK::memory_order_relaxed); + // Fast path: nobody's waiting (or at least, nobody has to be woken). + if (!(value & NEED_TO_WAKE_ALL)) [[likely]] + return 0; + + AK::atomic_fetch_and(&cond->value, ~(NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL), AK::memory_order_relaxed); + + pthread_mutex_t* mutex = AK::atomic_load(&cond->mutex, AK::memory_order_relaxed); + VERIFY(mutex); + + int rc = futex(&cond->value, FUTEX_REQUEUE, 1, nullptr, &mutex->lock, INT_MAX); + VERIFY(rc == 0); + return 0; +}