Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New locking scheme for dispatcher & scheduler #756

Closed
wants to merge 15 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions include/sys/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,7 @@ typedef enum {
#define TDF_SLICEEND 0x00000001 /* run out of time slice */
#define TDF_NEEDSWITCH 0x00000002 /* must switch on next opportunity */
#define TDF_NEEDSIGCHK 0x00000004 /* signals were posted for delivery */
#define TDF_BORROWING 0x00000010 /* priority propagation */
#define TDF_SLEEPY 0x00000020 /* thread is about to go to sleep */
#define TDF_BORROWING 0x00000008 /* priority propagation */
/* TDF_SLP* flags are used internally by sleep queue */
#define TDF_SLPINTR 0x00000040 /* sleep is interruptible */
#define TDF_SLPTIMED 0x00000080 /* sleep with timeout */
Expand Down Expand Up @@ -142,6 +141,8 @@ typedef struct thread {

thread_t *thread_self(void);

spin_t *thread_lock_set(thread_t *td, spin_t *new);

/*! \brief Initialize first thread in the system. */
void init_thread0(void);

Expand Down
2 changes: 1 addition & 1 deletion sys/kern/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#include <sys/pcpu.h>
#include <sys/turnstile.h>

static spin_t sched_lock = SPIN_INITIALIZER(0);
spin_t sched_lock = SPIN_INITIALIZER(0);
static runq_t runq;
static bool sched_active = false;

Expand Down
24 changes: 12 additions & 12 deletions sys/kern/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,19 +231,19 @@ void sig_kill(proc_t *p, signo_t sig) {
if (td_is_stopped(td))
sched_wakeup(td, 0);
} else {
WITH_SPIN_LOCK (td->td_lock) {
td->td_flags |= TDF_NEEDSIGCHK;
/* If the thread is sleeping interruptibly (!), wake it up, so that it
* continues execution and the signal gets delivered soon. */
if (td_is_interruptible(td)) {
/* XXX Maybe TDF_NEEDSIGCHK should be protected by a different lock? */
spin_unlock(td->td_lock);
sleepq_abort(td); /* Locks & unlocks td_lock */
spin_lock(td->td_lock);
} else if (td_is_stopped(td) && continued) {
sched_wakeup(td, 0);
}
spin_lock(td->td_lock);
td->td_flags |= TDF_NEEDSIGCHK;
/* If the thread is sleeping interruptibly (!), wake it up, so that it
* continues execution and the signal gets delivered soon. */
if (td_is_interruptible(td)) {
/* XXX Maybe TDF_NEEDSIGCHK should be protected by a different lock? */
spin_unlock(td->td_lock);
sleepq_abort(td); /* Locks & unlocks td_lock */
return;
}
if (td_is_stopped(td) && continued)
sched_wakeup(td, 0);
spin_unlock(td->td_lock);
}
}

Expand Down
152 changes: 72 additions & 80 deletions sys/kern/sleepq.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
#include <sys/errno.h>
#include <sys/callout.h>

extern spin_t sched_lock;

#define SC_TABLESIZE 256 /* Must be power of 2. */
#define SC_MASK (SC_TABLESIZE - 1)
#define SC_SHIFT 8
Expand Down Expand Up @@ -107,8 +109,8 @@ static sleepq_t *sq_lookup(sleepq_chain_t *sc, void *wchan) {
return NULL;
}

static void sq_enter(thread_t *td, void *wchan, const void *waitpt,
sleep_t sleep) {
static sleepq_t *sq_enter(thread_t *td, void *wchan, const void *waitpt,
sleep_t sleep) {
sleepq_chain_t *sc = SC_LOOKUP(wchan);
assert(sc_owned(sc));

Expand Down Expand Up @@ -146,75 +148,13 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt,
td->td_waitpt = waitpt;
td->td_sleepqueue = NULL;

/* The thread is about to fall asleep, but it still needs to reach
* sched_switch - it may get interrupted on the way, so mark our intent. */
td->td_flags |= TDF_SLEEPY;

if (sleep >= SLP_INTR)
td->td_flags |= TDF_SLPINTR;
if (sleep >= SLP_TIMED)
td->td_flags |= TDF_SLPTIMED;
}
}

static inline bool sq_interrupted_early(thread_t *td, sleep_t sleep) {
return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR;
}

static int sq_suspend(thread_t *td, void *wchan, const void *waitpt,
sleep_t sleep) {
int status = 0;

WITH_SPIN_LOCK (td->td_lock) {
if (td->td_flags & TDF_SLEEPY) {
td->td_flags &= ~TDF_SLEEPY;
if (sq_interrupted_early(td, sleep)) {
td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED);
status = EINTR;
} else {
td->td_state = TDS_SLEEPING;
sched_switch();
spin_lock(td->td_lock);
}
}
/* After wakeup, only one of the following flags may be set:
* - TDF_SLPINTR if sleep was aborted,
* - TDF_SLPTIMED if sleep has timed out. */
if (td->td_flags & TDF_SLPINTR) {
td->td_flags &= ~TDF_SLPINTR;
status = EINTR;
} else if (td->td_flags & TDF_SLPTIMED) {
td->td_flags &= ~TDF_SLPTIMED;
status = ETIMEDOUT;
}
}

return status;
}

static int sq_wait(void *wchan, const void *waitpt, sleep_t sleep) {
thread_t *td = thread_self();

/* If there are pending signals, interrupt the sleep immediately. */
WITH_SPIN_LOCK (td->td_lock) {
if (sq_interrupted_early(td, sleep))
return EINTR;
}

if (waitpt == NULL)
waitpt = __caller(0);

sleepq_chain_t *sc = sc_acquire(wchan);
sq_enter(td, wchan, waitpt, sleep);
sc_release(sc);

/* The code can be interrupted in here.
* A race is avoided by clever use of TDF_SLEEPY flag.
* We can also get a signal in here -- interrupt early if we got one.
* The first signal check is an optimization that saves us the call
* to sq_enter. */

return sq_suspend(td, wchan, waitpt, sleep);
return sq;
}

static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) {
Expand Down Expand Up @@ -245,27 +185,69 @@ static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) {
TAILQ_REMOVE(&sq->sq_free, sq, sq_entry);
}

WITH_SPIN_LOCK (td->td_lock) {
td->td_wchan = NULL;
td->td_waitpt = NULL;
td->td_sleepqueue = sq;
}
assert(spin_owned(td->td_lock));
td->td_wchan = NULL;
td->td_waitpt = NULL;
td->td_sleepqueue = sq;
}

static inline bool sq_interrupted_early(thread_t *td, sleep_t sleep) {
return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR;
}

static void sq_resume(thread_t *td, int wakeup) {
static int sq_suspend(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq,
sleep_t sleep) {
spin_lock(td->td_lock);

if (td->td_sleepqueue != NULL) {
spin_unlock(td->td_lock);
return 0;
}

if (sq_interrupted_early(td, sleep)) {
td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED);
sq_leave(td, sc, sq);
sc_release(sc);
spin_unlock(td->td_lock);
return EINTR;
}

thread_lock_set(td, &sc->sc_lock);
td->td_state = TDS_SLEEPING;
sched_switch();

WITH_SPIN_LOCK (td->td_lock) {
/* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */
if (wakeup != EINTR)
/* After wakeup, only one of the following flags may be set:
* - TDF_SLPINTR if sleep was aborted,
* - TDF_SLPTIMED if sleep has timed out. */
if (td->td_flags & TDF_SLPINTR) {
td->td_flags &= ~TDF_SLPINTR;
if (wakeup != ETIMEDOUT)
return EINTR;
}
if (td->td_flags & TDF_SLPTIMED) {
td->td_flags &= ~TDF_SLPTIMED;
/* Do not try to wake up a thread that is sleepy but did not fall asleep! */
if (td->td_flags & TDF_SLEEPY) {
td->td_flags &= ~TDF_SLEEPY;
} else {
sched_wakeup(td, 0);
return ETIMEDOUT;
}
}

return 0;
}

static int sq_wait(void *wchan, const void *waitpt, sleep_t sleep) {
thread_t *td = thread_self();

/* If there are pending signals, interrupt the sleep immediately. */
WITH_SPIN_LOCK (td->td_lock) {
if (sq_interrupted_early(td, sleep))
return EINTR;
}

if (waitpt == NULL)
waitpt = __caller(0);

sleepq_chain_t *sc = sc_acquire(wchan);
sleepq_t *sq = sq_enter(td, wchan, waitpt, sleep);
return sq_suspend(td, sc, sq, sleep);
}

/* Remove a thread from the sleep queue and resume it. */
Expand All @@ -281,8 +263,18 @@ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq,
if ((wakeup == ETIMEDOUT) && !(td->td_flags & TDF_SLPTIMED))
return false;

assert(td->td_lock == &sc->sc_lock);

sq_leave(td, sc, sq);
sq_resume(td, wakeup);
/* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */
if (wakeup != EINTR)
td->td_flags &= ~TDF_SLPINTR;
if (wakeup != ETIMEDOUT)
td->td_flags &= ~TDF_SLPTIMED;
WITH_SPIN_LOCK (&sched_lock) {
td->td_lock = &sched_lock;
sched_wakeup(td, 0);
}

return true;
}
Expand Down
19 changes: 15 additions & 4 deletions sys/kern/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,18 @@ static mtx_t *threads_lock = &MTX_INITIALIZER(0);
static thread_list_t all_threads = TAILQ_HEAD_INITIALIZER(all_threads);
static thread_list_t zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);

extern spin_t sched_lock;

spin_t *thread_lock_set(thread_t *td, spin_t *new) {
spin_t *old;
assert(spin_owned(new));
old = td->td_lock;
assert(spin_owned(old));
td->td_lock = new;
spin_unlock(old);
return old;
}

/* FTTB such a primitive method of creating new TIDs will do. */
static tid_t make_tid(void) {
static volatile tid_t tid = 1;
Expand Down Expand Up @@ -83,8 +95,7 @@ thread_t *thread_create(const char *name, void (*fn)(void *), void *arg,
td->td_prio = prio;
td->td_base_prio = prio;

td->td_lock = kmalloc(M_TEMP, sizeof(spin_t), M_ZERO);
spin_init(td->td_lock, 0);
td->td_lock = &sched_lock;

cv_init(&td->td_waitcv, "thread waiters");
LIST_INIT(&td->td_contested);
Expand Down Expand Up @@ -123,7 +134,6 @@ void thread_delete(thread_t *td) {
sleepq_destroy(td->td_sleepqueue);
turnstile_destroy(td->td_turnstile);
kfree(M_STR, td->td_name);
kfree(M_TEMP, td->td_lock);
pool_free(P_THREAD, td);
}

Expand Down Expand Up @@ -159,9 +169,10 @@ __noreturn void thread_exit(void) {
TAILQ_INSERT_TAIL(&zombie_threads, td, td_zombieq);
}

cv_broadcast(&td->td_waitcv);
spin_unlock(td->td_lock);

cv_broadcast(&td->td_waitcv);

spin_lock(td->td_lock);
td->td_state = TDS_DEAD;
sched_switch();
Expand Down
5 changes: 3 additions & 2 deletions sys/kern/turnstile.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,8 @@ static thread_t *acquire_owner(turnstile_t *ts) {
assert(ts->ts_state == USED_BLOCKED);
thread_t *td = ts->ts_owner;
assert(td != NULL); /* Turnstile must have an owner. */
spin_lock(td->td_lock);
if (!spin_owned(td->td_lock))
spin_lock(td->td_lock);
assert(!td_is_sleeping(td)); /* You must not sleep while holding a mutex. */
return td;
}
Expand Down Expand Up @@ -170,7 +171,7 @@ static void propagate_priority(thread_t *td) {
assert(td->td_blocked == NULL);
}

spin_unlock(td->td_lock);
// spin_unlock(td->td_lock);
}

void turnstile_adjust(thread_t *td, prio_t oldprio) {
Expand Down