diff --git a/include/sys/thread.h b/include/sys/thread.h index c7e4d25f5c..01941eca4d 100644 --- a/include/sys/thread.h +++ b/include/sys/thread.h @@ -62,8 +62,7 @@ typedef enum { #define TDF_SLICEEND 0x00000001 /* run out of time slice */ #define TDF_NEEDSWITCH 0x00000002 /* must switch on next opportunity */ #define TDF_NEEDSIGCHK 0x00000004 /* signals were posted for delivery */ -#define TDF_BORROWING 0x00000010 /* priority propagation */ -#define TDF_SLEEPY 0x00000020 /* thread is about to go to sleep */ +#define TDF_BORROWING 0x00000008 /* priority propagation */ /* TDF_SLP* flags are used internally by sleep queue */ #define TDF_SLPINTR 0x00000040 /* sleep is interruptible */ #define TDF_SLPTIMED 0x00000080 /* sleep with timeout */ @@ -142,6 +141,8 @@ typedef struct thread { thread_t *thread_self(void); +spin_t *thread_lock_set(thread_t *td, spin_t *new); + /*! \brief Initialize first thread in the system. */ void init_thread0(void); diff --git a/sys/kern/sched.c b/sys/kern/sched.c index 33802704eb..e6df749a57 100644 --- a/sys/kern/sched.c +++ b/sys/kern/sched.c @@ -11,7 +11,7 @@ #include #include -static spin_t sched_lock = SPIN_INITIALIZER(0); +spin_t sched_lock = SPIN_INITIALIZER(0); static runq_t runq; static bool sched_active = false; diff --git a/sys/kern/signal.c b/sys/kern/signal.c index 1ac151d059..0431b1fcc4 100644 --- a/sys/kern/signal.c +++ b/sys/kern/signal.c @@ -231,19 +231,19 @@ void sig_kill(proc_t *p, signo_t sig) { if (td_is_stopped(td)) sched_wakeup(td, 0); } else { - WITH_SPIN_LOCK (td->td_lock) { - td->td_flags |= TDF_NEEDSIGCHK; - /* If the thread is sleeping interruptibly (!), wake it up, so that it - * continues execution and the signal gets delivered soon. */ - if (td_is_interruptible(td)) { - /* XXX Maybe TDF_NEEDSIGCHK should be protected by a different lock? */ - spin_unlock(td->td_lock); - sleepq_abort(td); /* Locks & unlocks td_lock */ - spin_lock(td->td_lock); - } else if (td_is_stopped(td) && continued) { - sched_wakeup(td, 0); - } + spin_lock(td->td_lock); + td->td_flags |= TDF_NEEDSIGCHK; + /* If the thread is sleeping interruptibly (!), wake it up, so that it + * continues execution and the signal gets delivered soon. */ + if (td_is_interruptible(td)) { + /* XXX Maybe TDF_NEEDSIGCHK should be protected by a different lock? */ + spin_unlock(td->td_lock); + sleepq_abort(td); /* Locks & unlocks td_lock */ + return; } + if (td_is_stopped(td) && continued) + sched_wakeup(td, 0); + spin_unlock(td->td_lock); } } diff --git a/sys/kern/sleepq.c b/sys/kern/sleepq.c index 7097b19fbd..f42a57ac40 100644 --- a/sys/kern/sleepq.c +++ b/sys/kern/sleepq.c @@ -12,6 +12,8 @@ #include #include +extern spin_t sched_lock; + #define SC_TABLESIZE 256 /* Must be power of 2. */ #define SC_MASK (SC_TABLESIZE - 1) #define SC_SHIFT 8 @@ -107,8 +109,8 @@ static sleepq_t *sq_lookup(sleepq_chain_t *sc, void *wchan) { return NULL; } -static void sq_enter(thread_t *td, void *wchan, const void *waitpt, - sleep_t sleep) { +static sleepq_t *sq_enter(thread_t *td, void *wchan, const void *waitpt, + sleep_t sleep) { sleepq_chain_t *sc = SC_LOOKUP(wchan); assert(sc_owned(sc)); @@ -146,75 +148,13 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, td->td_waitpt = waitpt; td->td_sleepqueue = NULL; - /* The thread is about to fall asleep, but it still needs to reach - * sched_switch - it may get interrupted on the way, so mark our intent. */ - td->td_flags |= TDF_SLEEPY; - if (sleep >= SLP_INTR) td->td_flags |= TDF_SLPINTR; if (sleep >= SLP_TIMED) td->td_flags |= TDF_SLPTIMED; } -} - -static inline bool sq_interrupted_early(thread_t *td, sleep_t sleep) { - return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR; -} - -static int sq_suspend(thread_t *td, void *wchan, const void *waitpt, - sleep_t sleep) { - int status = 0; - - WITH_SPIN_LOCK (td->td_lock) { - if (td->td_flags & TDF_SLEEPY) { - td->td_flags &= ~TDF_SLEEPY; - if (sq_interrupted_early(td, sleep)) { - td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); - status = EINTR; - } else { - td->td_state = TDS_SLEEPING; - sched_switch(); - spin_lock(td->td_lock); - } - } - /* After wakeup, only one of the following flags may be set: - * - TDF_SLPINTR if sleep was aborted, - * - TDF_SLPTIMED if sleep has timed out. */ - if (td->td_flags & TDF_SLPINTR) { - td->td_flags &= ~TDF_SLPINTR; - status = EINTR; - } else if (td->td_flags & TDF_SLPTIMED) { - td->td_flags &= ~TDF_SLPTIMED; - status = ETIMEDOUT; - } - } - - return status; -} - -static int sq_wait(void *wchan, const void *waitpt, sleep_t sleep) { - thread_t *td = thread_self(); - - /* If there are pending signals, interrupt the sleep immediately. */ - WITH_SPIN_LOCK (td->td_lock) { - if (sq_interrupted_early(td, sleep)) - return EINTR; - } - - if (waitpt == NULL) - waitpt = __caller(0); - - sleepq_chain_t *sc = sc_acquire(wchan); - sq_enter(td, wchan, waitpt, sleep); - sc_release(sc); - - /* The code can be interrupted in here. - * A race is avoided by clever use of TDF_SLEEPY flag. - * We can also get a signal in here -- interrupt early if we got one. - * The first signal check is an optimization that saves us the call - * to sq_enter. */ - return sq_suspend(td, wchan, waitpt, sleep); + return sq; } static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { @@ -245,27 +185,69 @@ static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { TAILQ_REMOVE(&sq->sq_free, sq, sq_entry); } - WITH_SPIN_LOCK (td->td_lock) { - td->td_wchan = NULL; - td->td_waitpt = NULL; - td->td_sleepqueue = sq; - } + assert(spin_owned(td->td_lock)); + td->td_wchan = NULL; + td->td_waitpt = NULL; + td->td_sleepqueue = sq; +} + +static inline bool sq_interrupted_early(thread_t *td, sleep_t sleep) { + return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR; } -static void sq_resume(thread_t *td, int wakeup) { +static int sq_suspend(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, + sleep_t sleep) { + spin_lock(td->td_lock); + + if (td->td_sleepqueue != NULL) { + spin_unlock(td->td_lock); + return 0; + } + + if (sq_interrupted_early(td, sleep)) { + td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); + sq_leave(td, sc, sq); + sc_release(sc); + spin_unlock(td->td_lock); + return EINTR; + } + + thread_lock_set(td, &sc->sc_lock); + td->td_state = TDS_SLEEPING; + sched_switch(); + WITH_SPIN_LOCK (td->td_lock) { - /* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */ - if (wakeup != EINTR) + /* After wakeup, only one of the following flags may be set: + * - TDF_SLPINTR if sleep was aborted, + * - TDF_SLPTIMED if sleep has timed out. */ + if (td->td_flags & TDF_SLPINTR) { td->td_flags &= ~TDF_SLPINTR; - if (wakeup != ETIMEDOUT) + return EINTR; + } + if (td->td_flags & TDF_SLPTIMED) { td->td_flags &= ~TDF_SLPTIMED; - /* Do not try to wake up a thread that is sleepy but did not fall asleep! */ - if (td->td_flags & TDF_SLEEPY) { - td->td_flags &= ~TDF_SLEEPY; - } else { - sched_wakeup(td, 0); + return ETIMEDOUT; } } + + return 0; +} + +static int sq_wait(void *wchan, const void *waitpt, sleep_t sleep) { + thread_t *td = thread_self(); + + /* If there are pending signals, interrupt the sleep immediately. */ + WITH_SPIN_LOCK (td->td_lock) { + if (sq_interrupted_early(td, sleep)) + return EINTR; + } + + if (waitpt == NULL) + waitpt = __caller(0); + + sleepq_chain_t *sc = sc_acquire(wchan); + sleepq_t *sq = sq_enter(td, wchan, waitpt, sleep); + return sq_suspend(td, sc, sq, sleep); } /* Remove a thread from the sleep queue and resume it. */ @@ -281,8 +263,18 @@ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, if ((wakeup == ETIMEDOUT) && !(td->td_flags & TDF_SLPTIMED)) return false; + assert(td->td_lock == &sc->sc_lock); + sq_leave(td, sc, sq); - sq_resume(td, wakeup); + /* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */ + if (wakeup != EINTR) + td->td_flags &= ~TDF_SLPINTR; + if (wakeup != ETIMEDOUT) + td->td_flags &= ~TDF_SLPTIMED; + WITH_SPIN_LOCK (&sched_lock) { + td->td_lock = &sched_lock; + sched_wakeup(td, 0); + } return true; } diff --git a/sys/kern/thread.c b/sys/kern/thread.c index ba53b96225..6be23bf6b6 100644 --- a/sys/kern/thread.c +++ b/sys/kern/thread.c @@ -22,6 +22,18 @@ static mtx_t *threads_lock = &MTX_INITIALIZER(0); static thread_list_t all_threads = TAILQ_HEAD_INITIALIZER(all_threads); static thread_list_t zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); +extern spin_t sched_lock; + +spin_t *thread_lock_set(thread_t *td, spin_t *new) { + spin_t *old; + assert(spin_owned(new)); + old = td->td_lock; + assert(spin_owned(old)); + td->td_lock = new; + spin_unlock(old); + return old; +} + /* FTTB such a primitive method of creating new TIDs will do. */ static tid_t make_tid(void) { static volatile tid_t tid = 1; @@ -83,8 +95,7 @@ thread_t *thread_create(const char *name, void (*fn)(void *), void *arg, td->td_prio = prio; td->td_base_prio = prio; - td->td_lock = kmalloc(M_TEMP, sizeof(spin_t), M_ZERO); - spin_init(td->td_lock, 0); + td->td_lock = &sched_lock; cv_init(&td->td_waitcv, "thread waiters"); LIST_INIT(&td->td_contested); @@ -123,7 +134,6 @@ void thread_delete(thread_t *td) { sleepq_destroy(td->td_sleepqueue); turnstile_destroy(td->td_turnstile); kfree(M_STR, td->td_name); - kfree(M_TEMP, td->td_lock); pool_free(P_THREAD, td); } @@ -159,9 +169,10 @@ __noreturn void thread_exit(void) { TAILQ_INSERT_TAIL(&zombie_threads, td, td_zombieq); } - cv_broadcast(&td->td_waitcv); spin_unlock(td->td_lock); + cv_broadcast(&td->td_waitcv); + spin_lock(td->td_lock); td->td_state = TDS_DEAD; sched_switch(); diff --git a/sys/kern/turnstile.c b/sys/kern/turnstile.c index 2974ef8c4b..1a7a8fe8a6 100644 --- a/sys/kern/turnstile.c +++ b/sys/kern/turnstile.c @@ -131,7 +131,8 @@ static thread_t *acquire_owner(turnstile_t *ts) { assert(ts->ts_state == USED_BLOCKED); thread_t *td = ts->ts_owner; assert(td != NULL); /* Turnstile must have an owner. */ - spin_lock(td->td_lock); + if (!spin_owned(td->td_lock)) + spin_lock(td->td_lock); assert(!td_is_sleeping(td)); /* You must not sleep while holding a mutex. */ return td; } @@ -170,7 +171,7 @@ static void propagate_priority(thread_t *td) { assert(td->td_blocked == NULL); } - spin_unlock(td->td_lock); + // spin_unlock(td->td_lock); } void turnstile_adjust(thread_t *td, prio_t oldprio) {