From 7f6607d2364bf1f32b16b0a73acfb83c9c3fb62e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 15:14:57 +0200 Subject: [PATCH 01/11] Change td_lock to volatile pointer to spin lock. --- include/sys/thread.h | 4 ++-- sys/kern/exception.c | 2 +- sys/kern/proc.c | 2 +- sys/kern/sched.c | 20 ++++++++++---------- sys/kern/signal.c | 16 ++++++++-------- sys/kern/sleepq.c | 10 +++++----- sys/kern/thread.c | 26 ++++++++++++++------------ sys/kern/turnstile.c | 14 +++++++------- sys/tests/turnstile_adjust.c | 2 +- 9 files changed, 49 insertions(+), 47 deletions(-) diff --git a/include/sys/thread.h b/include/sys/thread.h index 28aa2b8b5b..fd0ea5cdec 100644 --- a/include/sys/thread.h +++ b/include/sys/thread.h @@ -93,8 +93,8 @@ typedef enum { */ typedef struct thread { /* locking */ - spin_t td_lock; /*!< (~) used by dispatcher & scheduler */ - condvar_t td_waitcv; /*!< (t) for thread_join */ + spin_t *volatile td_lock; /*!< (~) used by dispatcher & scheduler */ + condvar_t td_waitcv; /*!< (t) for thread_join */ /* linked lists */ TAILQ_ENTRY(thread) td_all; /* (a) link on all threads list */ TAILQ_ENTRY(thread) td_runq; /* ($) link on run queue */ diff --git a/sys/kern/exception.c b/sys/kern/exception.c index 89cee3b406..7323a64be8 100644 --- a/sys/kern/exception.c +++ b/sys/kern/exception.c @@ -11,7 +11,7 @@ void on_exc_leave(void) { thread_t *td = thread_self(); if (td->td_flags & TDF_NEEDSWITCH) { - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_state = TDS_READY; sched_switch(); } diff --git a/sys/kern/proc.c b/sys/kern/proc.c index e6e4836953..cc31a1705f 100644 --- a/sys/kern/proc.c +++ b/sys/kern/proc.c @@ -360,7 +360,7 @@ proc_t *proc_create(thread_t *td, proc_t *parent) { TAILQ_INIT(CHILDREN(p)); - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) td->td_proc = p; return p; diff --git a/sys/kern/sched.c b/sys/kern/sched.c index d7916713dd..3d25038855 100644 --- a/sys/kern/sched.c +++ b/sys/kern/sched.c @@ -23,12 +23,12 @@ void init_sched(void) { void sched_add(thread_t *td) { klog("Add thread %ld {%p} to scheduler", td->td_tid, td); - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) sched_wakeup(td, 0); } void sched_wakeup(thread_t *td, long reason) { - assert(spin_owned(&td->td_lock)); + assert(spin_owned(td->td_lock)); assert(td != thread_self()); assert(!td_is_running(td)); @@ -55,7 +55,7 @@ void sched_wakeup(thread_t *td, long reason) { * \note Must be called with \a td_spin acquired! */ static void sched_set_active_prio(thread_t *td, prio_t prio) { - assert(spin_owned(&td->td_lock)); + assert(spin_owned(td->td_lock)); if (prio_eq(td->td_prio, prio)) return; @@ -71,7 +71,7 @@ static void sched_set_active_prio(thread_t *td, prio_t prio) { } void sched_set_prio(thread_t *td, prio_t prio) { - assert(spin_owned(&td->td_lock)); + assert(spin_owned(td->td_lock)); td->td_base_prio = prio; @@ -89,7 +89,7 @@ void sched_set_prio(thread_t *td, prio_t prio) { } void sched_lend_prio(thread_t *td, prio_t prio) { - assert(spin_owned(&td->td_lock)); + assert(spin_owned(td->td_lock)); assert(prio_lt(td->td_prio, prio)); td->td_flags |= TDF_BORROWING; @@ -97,7 +97,7 @@ void sched_lend_prio(thread_t *td, prio_t prio) { } void sched_unlend_prio(thread_t *td, prio_t prio) { - assert(spin_owned(&td->td_lock)); + assert(spin_owned(td->td_lock)); if (prio_le(prio, td->td_base_prio)) { td->td_flags &= ~TDF_BORROWING; @@ -126,7 +126,7 @@ long sched_switch(void) { thread_t *td = thread_self(); - assert(spin_owned(&td->td_lock)); + assert(spin_owned(td->td_lock)); assert(!td_is_running(td)); td->td_flags &= ~(TDF_SLICEEND | TDF_NEEDSWITCH); @@ -170,7 +170,7 @@ void sched_clock(void) { thread_t *td = thread_self(); if (td != PCPU_GET(idle_thread)) { - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { if (--td->td_slice <= 0) td->td_flags |= TDF_NEEDSWITCH | TDF_SLICEEND; } @@ -191,7 +191,7 @@ __noreturn void sched_run(void) { sched_active = true; while (true) { - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) td->td_flags |= TDF_NEEDSWITCH; } } @@ -202,7 +202,7 @@ void sched_maybe_preempt(void) { thread_t *td = thread_self(); - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { if (td->td_flags & TDF_NEEDSWITCH) { td->td_state = TDS_READY; sched_switch(); diff --git a/sys/kern/signal.c b/sys/kern/signal.c index 12ec15bf27..4ada3aa0a5 100644 --- a/sys/kern/signal.c +++ b/sys/kern/signal.c @@ -134,7 +134,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oset) { } if (sig_pending(td)) { - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) td->td_flags |= TDF_NEEDSIGCHK; } @@ -158,7 +158,7 @@ int do_sigsuspend(proc_t *p, const sigset_t *mask) { * is set (without checking whether there's an actual pending signal), * so we clear the flag here if there are no real pending signals. */ - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { if (sig_pending(td)) td->td_flags |= TDF_NEEDSIGCHK; else @@ -227,19 +227,19 @@ void sig_kill(proc_t *p, signo_t sig) { * Exception: SIGCONT wakes up stopped threads even if it's blocked. */ if (__sigismember(&td->td_sigmask, sig)) { if (continued) - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) if (td_is_stopped(td)) sched_wakeup(td, 0); } else { - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_flags |= TDF_NEEDSIGCHK; /* If the thread is sleeping interruptibly (!), wake it up, so that it * continues execution and the signal gets delivered soon. */ if (td_is_interruptible(td)) { /* XXX Maybe TDF_NEEDSIGCHK should be protected by a different lock? */ - spin_unlock(&td->td_lock); + spin_unlock(td->td_lock); sleepq_abort(td); /* Locks & unlocks td_lock */ - spin_lock(&td->td_lock); + spin_lock(td->td_lock); } else if (td_is_stopped(td) && continued) { sched_wakeup(td, 0); } @@ -258,7 +258,7 @@ int sig_check(thread_t *td) { sig = sig_pending(td); if (sig == 0) { /* No pending signals, signal checking done. */ - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) td->td_flags &= ~TDF_NEEDSIGCHK; return 0; } @@ -309,7 +309,7 @@ void sig_post(signo_t sig) { mtx_unlock(all_proc_mtx); proc_lock(p); if (p->p_state == PS_STOPPED) { - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_state = TDS_STOPPED; /* We're holding a spinlock, so we can't be preempted here. */ /* Release locks before switching out! */ diff --git a/sys/kern/sleepq.c b/sys/kern/sleepq.c index 0dac96a43f..d0f2c7d8bb 100644 --- a/sys/kern/sleepq.c +++ b/sys/kern/sleepq.c @@ -154,7 +154,7 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_sleepq); sq->sq_nblocked++; - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_wchan = wchan; td->td_waitpt = waitpt; td->td_sleepqueue = NULL; @@ -202,7 +202,7 @@ static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { TAILQ_REMOVE(&sq->sq_free, sq, sq_entry); } - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_wchan = NULL; td->td_waitpt = NULL; td->td_sleepqueue = sq; @@ -218,7 +218,7 @@ static int _sleepq_wait(void *wchan, const void *waitpt, sleep_t sleep) { int status = 0; /* If there are pending signals, interrupt the sleep immediately. */ - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { if (_sleepq_interrupted_early(td, sleep)) return EINTR; } @@ -234,7 +234,7 @@ static int _sleepq_wait(void *wchan, const void *waitpt, sleep_t sleep) { * The first signal check is an optimization that saves us the call * to sq_enter. */ - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { if (td->td_flags & TDF_SLEEPY) { td->td_flags &= ~TDF_SLEEPY; if (_sleepq_interrupted_early(td, sleep)) { @@ -275,7 +275,7 @@ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, sq_leave(td, sc, sq); - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { /* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */ if (wakeup != EINTR) td->td_flags &= ~TDF_SLPINTR; diff --git a/sys/kern/thread.c b/sys/kern/thread.c index 41e4ee1bbf..f841d99d1c 100644 --- a/sys/kern/thread.c +++ b/sys/kern/thread.c @@ -33,7 +33,7 @@ static alignas(PAGESIZE) uint8_t _stack0[PAGESIZE]; /* Thread Zero is initially running with interrupts disabled! */ thread_t thread0 = { - .td_lock = SPIN_INITIALIZER(0), + .td_lock = &SPIN_INITIALIZER(0), .td_name = "thread0", .td_tid = 0, .td_prio = 255, @@ -84,7 +84,8 @@ thread_t *thread_create(const char *name, void (*fn)(void *), void *arg, td->td_prio = prio; td->td_base_prio = prio; - td->td_lock = SPIN_INITIALIZER(0); + td->td_lock = kmalloc(M_TEMP, sizeof(spin_t), M_ZERO); + spin_init(td->td_lock, 0); cv_init(&td->td_waitcv, "thread waiters"); LIST_INIT(&td->td_contested); @@ -123,6 +124,7 @@ void thread_delete(thread_t *td) { sleepq_destroy(td->td_sleepqueue); turnstile_destroy(td->td_turnstile); kfree(M_STR, td->td_name); + kfree(M_TEMP, td->td_lock); pool_free(P_THREAD, td); } @@ -154,14 +156,14 @@ __noreturn void thread_exit(void) { preempt_disable(); WITH_MTX_LOCK (threads_lock) { - spin_lock(&td->td_lock); /* force threads_lock >> thread_t::td_lock order */ + spin_lock(td->td_lock); /* force threads_lock >> thread_t::td_lock order */ TAILQ_INSERT_TAIL(&zombie_threads, td, td_zombieq); } cv_broadcast(&td->td_waitcv); - spin_unlock(&td->td_lock); + spin_unlock(td->td_lock); - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_state = TDS_DEAD; sched_switch(); } @@ -172,18 +174,18 @@ __noreturn void thread_exit(void) { void thread_join(thread_t *otd) { thread_t *td = thread_self(); - SCOPED_SPIN_LOCK(&otd->td_lock); - klog("Join %ld {%p} with %ld {%p}", td->td_tid, td, otd->td_tid, otd); - while (!td_is_dead(otd)) - cv_wait(&otd->td_waitcv, &otd->td_lock); + WITH_SPIN_LOCK (otd->td_lock) { + while (!td_is_dead(otd)) + cv_wait(&otd->td_waitcv, otd->td_lock); + } } void thread_yield(void) { thread_t *td = thread_self(); - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_state = TDS_READY; sched_switch(); } @@ -196,10 +198,10 @@ thread_t *thread_find(tid_t id) { thread_t *td; TAILQ_FOREACH (td, &all_threads, td_all) { - spin_lock(&td->td_lock); + spin_lock(td->td_lock); if (td->td_tid == id) return td; - spin_unlock(&td->td_lock); + spin_unlock(td->td_lock); } return NULL; } diff --git a/sys/kern/turnstile.c b/sys/kern/turnstile.c index a161ac22f7..44dddc25bb 100644 --- a/sys/kern/turnstile.c +++ b/sys/kern/turnstile.c @@ -129,7 +129,7 @@ static thread_t *acquire_owner(turnstile_t *ts) { assert(ts->ts_state == USED_BLOCKED); thread_t *td = ts->ts_owner; assert(td != NULL); /* Turnstile must have an owner. */ - spin_lock(&td->td_lock); + spin_lock(td->td_lock); assert(!td_is_sleeping(td)); /* You must not sleep while holding a mutex. */ return td; } @@ -157,7 +157,7 @@ static void propagate_priority(thread_t *td) { /* Resort td on the blocked list if needed. */ adjust_thread(ts, td, oldprio); - spin_unlock(&td->td_lock); + spin_unlock(td->td_lock); td = acquire_owner(ts); } @@ -168,11 +168,11 @@ static void propagate_priority(thread_t *td) { assert(td->td_blocked == NULL); } - spin_unlock(&td->td_lock); + spin_unlock(td->td_lock); } void turnstile_adjust(thread_t *td, prio_t oldprio) { - assert(spin_owned(&td->td_lock)); + assert(spin_owned(td->td_lock)); assert(td_is_blocked(td)); turnstile_t *ts = td->td_blocked; @@ -232,7 +232,7 @@ static void switch_away(turnstile_t *ts, const void *waitpt) { assert(ts->ts_state == USED_BLOCKED); thread_t *td = thread_self(); - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { td->td_turnstile = NULL; td->td_blocked = ts; td->td_wchan = ts->ts_wchan; @@ -300,7 +300,7 @@ static void unlend_self(turnstile_t *ts) { prio = p; } - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) sched_unlend_prio(td, prio); } @@ -309,7 +309,7 @@ static void wakeup_blocked(td_queue_t *blocked_threads) { thread_t *td = TAILQ_FIRST(blocked_threads); TAILQ_REMOVE(blocked_threads, td, td_blockedq); - WITH_SPIN_LOCK (&td->td_lock) { + WITH_SPIN_LOCK (td->td_lock) { assert(td_is_blocked(td)); td->td_blocked = NULL; td->td_wchan = NULL; diff --git a/sys/tests/turnstile_adjust.c b/sys/tests/turnstile_adjust.c index a1d97a21b7..07dbf447a2 100644 --- a/sys/tests/turnstile_adjust.c +++ b/sys/tests/turnstile_adjust.c @@ -20,7 +20,7 @@ static mtx_t ts_adj_mtx = MTX_INITIALIZER(0); static thread_t *threads[T]; static void set_prio(thread_t *td, prio_t prio) { - WITH_SPIN_LOCK (&td->td_lock) + WITH_SPIN_LOCK (td->td_lock) sched_set_prio(td, prio); } From 94ef4460bdba5218a0fd1d5dff8c844fa993250e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 15:36:05 +0200 Subject: [PATCH 02/11] Fix gdb backtrace for ctx_switch. --- sys/mips/switch.S | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/sys/mips/switch.S b/sys/mips/switch.S index 43277dc3e1..903366cf26 100644 --- a/sys/mips/switch.S +++ b/sys/mips/switch.S @@ -10,25 +10,29 @@ .local ctx_resume .local ctx_save +#define SAVE_REG_CFI(reg, offset, base) \ + sw reg, (CTX_##offset)(base); \ + .cfi_rel_offset reg, (CTX_##offset) + #define SAVE_REG(reg, offset, base) sw reg, (CTX_##offset)(base) #define LOAD_REG(reg, offset, base) lw reg, (CTX_##offset)(base) #define SAVE_CTX(_sr, _sp, reg) \ SAVE_REG(_sr, SR, reg); \ - SAVE_REG(ra, PC, reg); \ - SAVE_REG(fp, FP, reg); \ + SAVE_REG_CFI(ra, PC, reg); \ + SAVE_REG_CFI(fp, FP, reg); \ SAVE_REG(_sp, SP, reg); \ - SAVE_REG(gp, GP, reg); \ + SAVE_REG_CFI(gp, GP, reg); \ SAVE_REG(zero, V0, reg); \ - SAVE_REG(s0, S0, reg); \ - SAVE_REG(s1, S1, reg); \ - SAVE_REG(s2, S2, reg); \ - SAVE_REG(s3, S3, reg); \ - SAVE_REG(s4, S4, reg); \ - SAVE_REG(s5, S5, reg); \ - SAVE_REG(s6, S6, reg); \ - SAVE_REG(s7, S7, reg) + SAVE_REG_CFI(s0, S0, reg); \ + SAVE_REG_CFI(s1, S1, reg); \ + SAVE_REG_CFI(s2, S2, reg); \ + SAVE_REG_CFI(s3, S3, reg); \ + SAVE_REG_CFI(s4, S4, reg); \ + SAVE_REG_CFI(s5, S5, reg); \ + SAVE_REG_CFI(s6, S6, reg); \ + SAVE_REG_CFI(s7, S7, reg) #define LOAD_CTX(_sr, reg) \ LOAD_REG(_sr, SR, reg); \ @@ -65,7 +69,9 @@ ctx_save: # save context of @from thread move t1, sp subu sp, CTX_SIZE + .cfi_def_cfa sp, 0 SAVE_CTX(t0, t1, sp) + .cfi_rel_offset sp, CTX_SP sw sp, TD_KCTX(a0) move s0, a0 # save @from pointer From a2fd6b016994c1d31b5c5d5b0484301bdeeae395 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 15:45:47 +0200 Subject: [PATCH 03/11] Fix ctx_switch - now it dereferences a pointer to lock. --- sys/mips/switch.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sys/mips/switch.S b/sys/mips/switch.S index 903366cf26..348b1f8e4b 100644 --- a/sys/mips/switch.S +++ b/sys/mips/switch.S @@ -82,7 +82,7 @@ ctx_save: # release @from thread spin lock jal spin_unlock - addu a0, s0, TD_LOCK # (delay) 1st arg - @from spin lock + lw a0, TD_LOCK(s0) # (delay) 1st arg - @from spin lock ctx_resume: # update curthread pointer to reference @to thread @@ -98,7 +98,7 @@ ctx_resume: # acquire @to thread spin lock la a1, ctx_resume # 2nd arg - waiting point jal _spin_lock - addu a0, s1, TD_LOCK # (delay) 1st arg - @to spin lock + lw a0, TD_LOCK(s1) # (delay) 1st arg - @to spin lock # Enable interrupts finishing safe interlock on td_spin. jal intr_enable From 2a573d608ab34cd8a2b6197ee4123e6b330365af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 15:58:46 +0200 Subject: [PATCH 04/11] Thread0 starts with scheduler lock as td_lock. --- sys/kern/main.c | 2 +- sys/kern/sched.c | 4 +++- sys/kern/thread.c | 1 - 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/sys/kern/main.c b/sys/kern/main.c index 0cf8bc23f3..35ea4d6d05 100644 --- a/sys/kern/main.c +++ b/sys/kern/main.c @@ -82,8 +82,8 @@ __noreturn void kernel_init(void) { /* Make dispatcher & scheduler structures ready for use. */ init_sleepq(); init_turnstile(); - init_sched(); init_thread0(); + init_sched(); /* With scheduler ready we can create necessary threads. */ init_ithreads(); diff --git a/sys/kern/sched.c b/sys/kern/sched.c index 3d25038855..300067350c 100644 --- a/sys/kern/sched.c +++ b/sys/kern/sched.c @@ -7,16 +7,18 @@ #include #include #include -#include +#include #include #include +static spin_t sched_lock = SPIN_INITIALIZER(0); static runq_t runq; static bool sched_active = false; #define SLICE 10 void init_sched(void) { + thread0.td_lock = &sched_lock; runq_init(&runq); } diff --git a/sys/kern/thread.c b/sys/kern/thread.c index f841d99d1c..41ece64320 100644 --- a/sys/kern/thread.c +++ b/sys/kern/thread.c @@ -33,7 +33,6 @@ static alignas(PAGESIZE) uint8_t _stack0[PAGESIZE]; /* Thread Zero is initially running with interrupts disabled! */ thread_t thread0 = { - .td_lock = &SPIN_INITIALIZER(0), .td_name = "thread0", .td_tid = 0, .td_prio = 255, From c374adc52a7ee60f00f1c2abde1089f3ae945492 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 16:20:56 +0200 Subject: [PATCH 05/11] Add a spin lock for each turnstile chain. --- sys/kern/turnstile.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sys/kern/turnstile.c b/sys/kern/turnstile.c index 44dddc25bb..98b9ca4c74 100644 --- a/sys/kern/turnstile.c +++ b/sys/kern/turnstile.c @@ -52,6 +52,7 @@ typedef struct turnstile { } turnstile_t; typedef struct turnstile_chain { + spin_t tc_lock; ts_list_t tc_turnstiles; } turnstile_chain_t; @@ -68,6 +69,7 @@ static void turnstile_ctor(turnstile_t *ts) { void init_turnstile(void) { for (int i = 0; i < TC_TABLESIZE; i++) { turnstile_chain_t *tc = &turnstile_chains[i]; + tc->tc_lock = SPIN_INITIALIZER(0); LIST_INIT(&tc->tc_turnstiles); } } From b0ca02b7922a10c896a7b05ded87bacaf71ec994 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 17:03:10 +0200 Subject: [PATCH 06/11] Remove unnecessary sq_lock. --- sys/kern/sleepq.c | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/sys/kern/sleepq.c b/sys/kern/sleepq.c index d0f2c7d8bb..e0bf467e60 100644 --- a/sys/kern/sleepq.c +++ b/sys/kern/sleepq.c @@ -49,9 +49,10 @@ static void sc_release(sleepq_chain_t *sc) { spin_unlock(&sc->sc_lock); } -/*! \brief stores all threads sleeping on the same resource */ +/*! \brief stores all threads sleeping on the same resource. + * + * All fields below are protected by corresponding sc_lock. */ typedef struct sleepq { - spin_t sq_lock; TAILQ_ENTRY(sleepq) sq_entry; /*!< link on sleepq_chain */ TAILQ_HEAD(, sleepq) sq_free; /*!< unused sleep queue records */ TAILQ_HEAD(, thread) sq_blocked; /*!< blocked threads */ @@ -59,24 +60,11 @@ typedef struct sleepq { void *sq_wchan; /*!< associated waiting channel */ } sleepq_t; -static void sq_acquire(sleepq_t *sq) { - spin_lock(&sq->sq_lock); -} - -static bool sq_owned(sleepq_t *sq) { - return spin_owned(&sq->sq_lock); -} - -static void sq_release(sleepq_t *sq) { - spin_unlock(&sq->sq_lock); -} - static void sq_ctor(sleepq_t *sq) { TAILQ_INIT(&sq->sq_blocked); TAILQ_INIT(&sq->sq_free); sq->sq_nblocked = 0; sq->sq_wchan = NULL; - sq->sq_lock = SPIN_INITIALIZER(0); } void init_sleepq(void) { @@ -112,10 +100,8 @@ static sleepq_t *sq_lookup(sleepq_chain_t *sc, void *wchan) { sleepq_t *sq; TAILQ_FOREACH (sq, &sc->sc_queues, sq_entry) { - if (sq->sq_wchan == wchan) { - sq_acquire(sq); + if (sq->sq_wchan == wchan) return sq; - } } return NULL; @@ -142,7 +128,6 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, /* No sleep queue for this waiting channel. * We take current thread's sleep queue and use it for that purpose. */ sq = td_sq; - sq_acquire(sq); sq->sq_wchan = wchan; TAILQ_INSERT_HEAD(&sc->sc_queues, sq, sq_entry); } else { @@ -169,7 +154,6 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, td->td_flags |= TDF_SLPTIMED; } - sq_release(sq); sc_release(sc); } @@ -178,7 +162,6 @@ static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { td->td_waitpt); assert(sc_owned(sc)); - assert(sq_owned(sq)); assert(td->td_wchan != NULL); assert(td->td_sleepqueue == NULL); @@ -308,7 +291,6 @@ bool sleepq_signal(void *wchan) { } sq_wakeup(best_td, sc, sq, SLP_NORMAL); - sq_release(sq); sc_release(sc); sched_maybe_preempt(); @@ -327,7 +309,6 @@ bool sleepq_broadcast(void *wchan) { thread_t *td; TAILQ_FOREACH (td, &sq->sq_blocked, td_sleepq) sq_wakeup(td, sc, sq, SLP_NORMAL); - sq_release(sq); sc_release(sc); sched_maybe_preempt(); @@ -339,10 +320,8 @@ static bool _sleepq_abort(thread_t *td, int reason) { sleepq_t *sq = sq_lookup(sc, td->td_wchan); bool aborted = false; - if (sq != NULL) { + if (sq != NULL) aborted = sq_wakeup(td, sc, sq, reason); - sq_release(sq); - } sc_release(sc); /* If we woke up higher priority thread, we should switch to it immediately. From 41ec260663655c276087ab4a62e80a34f00dc7bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 17:20:51 +0200 Subject: [PATCH 07/11] Simplify _sleepq_wait body. --- sys/kern/sleepq.c | 112 +++++++++++++++++++++++++--------------------- 1 file changed, 60 insertions(+), 52 deletions(-) diff --git a/sys/kern/sleepq.c b/sys/kern/sleepq.c index e0bf467e60..7394228e80 100644 --- a/sys/kern/sleepq.c +++ b/sys/kern/sleepq.c @@ -109,6 +109,9 @@ static sleepq_t *sq_lookup(sleepq_chain_t *sc, void *wchan) { static void sq_enter(thread_t *td, void *wchan, const void *waitpt, sleep_t sleep) { + sleepq_chain_t *sc = SC_LOOKUP(wchan); + assert(sc_owned(sc)); + klog("Thread %ld goes to sleep on %p at pc=%p", td->td_tid, wchan, waitpt); assert(td->td_wchan == NULL); @@ -121,7 +124,6 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, assert(TAILQ_EMPTY(&td_sq->sq_free)); assert(td_sq->sq_nblocked == 0); - sleepq_chain_t *sc = sc_acquire(wchan); sleepq_t *sq = sq_lookup(sc, wchan); if (sq == NULL) { @@ -153,8 +155,65 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, if (sleep >= SLP_TIMED) td->td_flags |= TDF_SLPTIMED; } +} + +static inline bool _sleepq_interrupted_early(thread_t *td, sleep_t sleep) { + return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR; +} +static int sq_switch(thread_t *td, void *wchan, const void *waitpt, + sleep_t sleep) { + int status = 0; + + WITH_SPIN_LOCK (td->td_lock) { + if (td->td_flags & TDF_SLEEPY) { + td->td_flags &= ~TDF_SLEEPY; + if (_sleepq_interrupted_early(td, sleep)) { + td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); + status = EINTR; + } else { + td->td_state = TDS_SLEEPING; + sched_switch(); + } + } + /* After wakeup, only one of the following flags may be set: + * - TDF_SLPINTR if sleep was aborted, + * - TDF_SLPTIMED if sleep has timed out. */ + if (td->td_flags & TDF_SLPINTR) { + td->td_flags &= ~TDF_SLPINTR; + status = EINTR; + } else if (td->td_flags & TDF_SLPTIMED) { + td->td_flags &= ~TDF_SLPTIMED; + status = ETIMEDOUT; + } + } + + return status; +} + +static int _sleepq_wait(void *wchan, const void *waitpt, sleep_t sleep) { + thread_t *td = thread_self(); + + /* If there are pending signals, interrupt the sleep immediately. */ + WITH_SPIN_LOCK (td->td_lock) { + if (_sleepq_interrupted_early(td, sleep)) + return EINTR; + } + + if (waitpt == NULL) + waitpt = __caller(0); + + sleepq_chain_t *sc = sc_acquire(wchan); + sq_enter(td, wchan, waitpt, sleep); sc_release(sc); + + /* The code can be interrupted in here. + * A race is avoided by clever use of TDF_SLEEPY flag. + * We can also get a signal in here -- interrupt early if we got one. + * The first signal check is an optimization that saves us the call + * to sq_enter. */ + + return sq_switch(td, wchan, waitpt, sleep); } static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { @@ -192,57 +251,6 @@ static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { } } -static inline bool _sleepq_interrupted_early(thread_t *td, sleep_t sleep) { - return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR; -} - -static int _sleepq_wait(void *wchan, const void *waitpt, sleep_t sleep) { - thread_t *td = thread_self(); - int status = 0; - - /* If there are pending signals, interrupt the sleep immediately. */ - WITH_SPIN_LOCK (td->td_lock) { - if (_sleepq_interrupted_early(td, sleep)) - return EINTR; - } - - if (waitpt == NULL) - waitpt = __caller(0); - - sq_enter(td, wchan, waitpt, sleep); - - /* The code can be interrupted in here. - * A race is avoided by clever use of TDF_SLEEPY flag. - * We can also get a signal in here -- interrupt early if we got one. - * The first signal check is an optimization that saves us the call - * to sq_enter. */ - - WITH_SPIN_LOCK (td->td_lock) { - if (td->td_flags & TDF_SLEEPY) { - td->td_flags &= ~TDF_SLEEPY; - if (_sleepq_interrupted_early(td, sleep)) { - td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); - status = EINTR; - } else { - td->td_state = TDS_SLEEPING; - sched_switch(); - } - } - /* After wakeup, only one of the following flags may be set: - * - TDF_SLPINTR if sleep was aborted, - * - TDF_SLPTIMED if sleep has timed out. */ - if (td->td_flags & TDF_SLPINTR) { - td->td_flags &= ~TDF_SLPINTR; - status = EINTR; - } else if (td->td_flags & TDF_SLPTIMED) { - td->td_flags &= ~TDF_SLPTIMED; - status = ETIMEDOUT; - } - } - - return status; -} - /* Remove a thread from the sleep queue and resume it. */ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, int wakeup) { From 7d36dedd59ba25fd242cf71f5fad19b7cb1addca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Wed, 16 Sep 2020 17:30:41 +0200 Subject: [PATCH 08/11] Simplify sq_wakeup body. Rename some functions. --- sys/kern/sleepq.c | 50 +++++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/sys/kern/sleepq.c b/sys/kern/sleepq.c index 7394228e80..3c024139d6 100644 --- a/sys/kern/sleepq.c +++ b/sys/kern/sleepq.c @@ -27,7 +27,7 @@ typedef struct sleepq_chain { /*! \brief Sleeping mode. * - * Used to select sleeping mode in `_sleepq_wait`. For sleeping purposes given + * Used to select sleeping mode in `sq_wait`. For sleeping purposes given * mode implies former modes, i.e. sleep with timeout (`SLP_TIMED`) is also * interruptible (`SLP_INTR`). */ @@ -157,18 +157,18 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, } } -static inline bool _sleepq_interrupted_early(thread_t *td, sleep_t sleep) { +static inline bool sq_interrupted_early(thread_t *td, sleep_t sleep) { return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR; } -static int sq_switch(thread_t *td, void *wchan, const void *waitpt, - sleep_t sleep) { +static int sq_suspend(thread_t *td, void *wchan, const void *waitpt, + sleep_t sleep) { int status = 0; WITH_SPIN_LOCK (td->td_lock) { if (td->td_flags & TDF_SLEEPY) { td->td_flags &= ~TDF_SLEEPY; - if (_sleepq_interrupted_early(td, sleep)) { + if (sq_interrupted_early(td, sleep)) { td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); status = EINTR; } else { @@ -191,12 +191,12 @@ static int sq_switch(thread_t *td, void *wchan, const void *waitpt, return status; } -static int _sleepq_wait(void *wchan, const void *waitpt, sleep_t sleep) { +static int sq_wait(void *wchan, const void *waitpt, sleep_t sleep) { thread_t *td = thread_self(); /* If there are pending signals, interrupt the sleep immediately. */ WITH_SPIN_LOCK (td->td_lock) { - if (_sleepq_interrupted_early(td, sleep)) + if (sq_interrupted_early(td, sleep)) return EINTR; } @@ -213,7 +213,7 @@ static int _sleepq_wait(void *wchan, const void *waitpt, sleep_t sleep) { * The first signal check is an optimization that saves us the call * to sq_enter. */ - return sq_switch(td, wchan, waitpt, sleep); + return sq_suspend(td, wchan, waitpt, sleep); } static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { @@ -251,6 +251,22 @@ static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { } } +static void sq_resume(thread_t *td, int wakeup) { + WITH_SPIN_LOCK (td->td_lock) { + /* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */ + if (wakeup != EINTR) + td->td_flags &= ~TDF_SLPINTR; + if (wakeup != ETIMEDOUT) + td->td_flags &= ~TDF_SLPTIMED; + /* Do not try to wake up a thread that is sleepy but did not fall asleep! */ + if (td->td_flags & TDF_SLEEPY) { + td->td_flags &= ~TDF_SLEEPY; + } else { + sched_wakeup(td, 0); + } + } +} + /* Remove a thread from the sleep queue and resume it. */ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, int wakeup) { @@ -265,20 +281,8 @@ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, return false; sq_leave(td, sc, sq); + sq_resume(td, wakeup); - WITH_SPIN_LOCK (td->td_lock) { - /* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */ - if (wakeup != EINTR) - td->td_flags &= ~TDF_SLPINTR; - if (wakeup != ETIMEDOUT) - td->td_flags &= ~TDF_SLPTIMED; - /* Do not try to wake up a thread that is sleepy but did not fall asleep! */ - if (td->td_flags & TDF_SLEEPY) { - td->td_flags &= ~TDF_SLEEPY; - } else { - sched_wakeup(td, 0); - } - } return true; } @@ -344,7 +348,7 @@ bool sleepq_abort(thread_t *td) { } void sleepq_wait(void *wchan, const void *waitpt) { - int status = _sleepq_wait(wchan, waitpt, 0); + int status = sq_wait(wchan, waitpt, 0); assert(status == 0); } @@ -359,7 +363,7 @@ int sleepq_wait_timed(void *wchan, const void *waitpt, systime_t timeout) { if (timeout > 0) callout_setup_relative(&td->td_slpcallout, timeout, (timeout_t)sq_timeout, thread_self()); - status = _sleepq_wait(wchan, waitpt, timeout ? SLP_TIMED : SLP_INTR); + status = sq_wait(wchan, waitpt, timeout ? SLP_TIMED : SLP_INTR); } if (timeout > 0) callout_stop(&td->td_slpcallout); From ba76cc7e6e3286c9a60a38d232e5425165f5c6eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Mon, 5 Oct 2020 17:13:05 +0200 Subject: [PATCH 09/11] Introduce thread_lock_set function. --- include/sys/thread.h | 2 ++ sys/kern/thread.c | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/include/sys/thread.h b/include/sys/thread.h index fd0ea5cdec..1564071e47 100644 --- a/include/sys/thread.h +++ b/include/sys/thread.h @@ -143,6 +143,8 @@ typedef struct thread { thread_t *thread_self(void); +spin_t *thread_lock_set(thread_t *td, spin_t *new); + /*! \brief Initialize first thread in the system. */ void init_thread0(void); diff --git a/sys/kern/thread.c b/sys/kern/thread.c index 41ece64320..ca8d057465 100644 --- a/sys/kern/thread.c +++ b/sys/kern/thread.c @@ -22,6 +22,16 @@ static mtx_t *threads_lock = &MTX_INITIALIZER(0); static thread_list_t all_threads = TAILQ_HEAD_INITIALIZER(all_threads); static thread_list_t zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); +spin_t *thread_lock_set(thread_t *td, spin_t *new) { + spin_t *old; + spin_owned(new); + old = td->td_lock; + spin_owned(old); + td->td_lock = new; + spin_unlock(old); + return old; +} + /* FTTB such a primitive method of creating new TIDs will do. */ static tid_t make_tid(void) { static volatile tid_t tid = 1; From 1dced6b53e40f5e005003d7db352f2e091fd9d01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Mon, 5 Oct 2020 19:45:38 +0200 Subject: [PATCH 10/11] Seems that switching between sched_lock and sc_locks works. --- include/sys/thread.h | 3 +- sys/kern/sched.c | 2 +- sys/kern/signal.c | 24 +++---- sys/kern/sleepq.c | 152 ++++++++++++++++++++----------------------- sys/kern/thread.c | 13 ++-- sys/kern/turnstile.c | 5 +- 6 files changed, 96 insertions(+), 103 deletions(-) diff --git a/include/sys/thread.h b/include/sys/thread.h index ee886faeab..01941eca4d 100644 --- a/include/sys/thread.h +++ b/include/sys/thread.h @@ -62,8 +62,7 @@ typedef enum { #define TDF_SLICEEND 0x00000001 /* run out of time slice */ #define TDF_NEEDSWITCH 0x00000002 /* must switch on next opportunity */ #define TDF_NEEDSIGCHK 0x00000004 /* signals were posted for delivery */ -#define TDF_BORROWING 0x00000010 /* priority propagation */ -#define TDF_SLEEPY 0x00000020 /* thread is about to go to sleep */ +#define TDF_BORROWING 0x00000008 /* priority propagation */ /* TDF_SLP* flags are used internally by sleep queue */ #define TDF_SLPINTR 0x00000040 /* sleep is interruptible */ #define TDF_SLPTIMED 0x00000080 /* sleep with timeout */ diff --git a/sys/kern/sched.c b/sys/kern/sched.c index 33802704eb..e6df749a57 100644 --- a/sys/kern/sched.c +++ b/sys/kern/sched.c @@ -11,7 +11,7 @@ #include #include -static spin_t sched_lock = SPIN_INITIALIZER(0); +spin_t sched_lock = SPIN_INITIALIZER(0); static runq_t runq; static bool sched_active = false; diff --git a/sys/kern/signal.c b/sys/kern/signal.c index 1ac151d059..0431b1fcc4 100644 --- a/sys/kern/signal.c +++ b/sys/kern/signal.c @@ -231,19 +231,19 @@ void sig_kill(proc_t *p, signo_t sig) { if (td_is_stopped(td)) sched_wakeup(td, 0); } else { - WITH_SPIN_LOCK (td->td_lock) { - td->td_flags |= TDF_NEEDSIGCHK; - /* If the thread is sleeping interruptibly (!), wake it up, so that it - * continues execution and the signal gets delivered soon. */ - if (td_is_interruptible(td)) { - /* XXX Maybe TDF_NEEDSIGCHK should be protected by a different lock? */ - spin_unlock(td->td_lock); - sleepq_abort(td); /* Locks & unlocks td_lock */ - spin_lock(td->td_lock); - } else if (td_is_stopped(td) && continued) { - sched_wakeup(td, 0); - } + spin_lock(td->td_lock); + td->td_flags |= TDF_NEEDSIGCHK; + /* If the thread is sleeping interruptibly (!), wake it up, so that it + * continues execution and the signal gets delivered soon. */ + if (td_is_interruptible(td)) { + /* XXX Maybe TDF_NEEDSIGCHK should be protected by a different lock? */ + spin_unlock(td->td_lock); + sleepq_abort(td); /* Locks & unlocks td_lock */ + return; } + if (td_is_stopped(td) && continued) + sched_wakeup(td, 0); + spin_unlock(td->td_lock); } } diff --git a/sys/kern/sleepq.c b/sys/kern/sleepq.c index 7097b19fbd..fec905b2a4 100644 --- a/sys/kern/sleepq.c +++ b/sys/kern/sleepq.c @@ -12,6 +12,8 @@ #include #include +extern spin_t sched_lock; + #define SC_TABLESIZE 256 /* Must be power of 2. */ #define SC_MASK (SC_TABLESIZE - 1) #define SC_SHIFT 8 @@ -107,8 +109,8 @@ static sleepq_t *sq_lookup(sleepq_chain_t *sc, void *wchan) { return NULL; } -static void sq_enter(thread_t *td, void *wchan, const void *waitpt, - sleep_t sleep) { +static sleepq_t *sq_enter(thread_t *td, void *wchan, const void *waitpt, + sleep_t sleep) { sleepq_chain_t *sc = SC_LOOKUP(wchan); assert(sc_owned(sc)); @@ -146,75 +148,13 @@ static void sq_enter(thread_t *td, void *wchan, const void *waitpt, td->td_waitpt = waitpt; td->td_sleepqueue = NULL; - /* The thread is about to fall asleep, but it still needs to reach - * sched_switch - it may get interrupted on the way, so mark our intent. */ - td->td_flags |= TDF_SLEEPY; - if (sleep >= SLP_INTR) td->td_flags |= TDF_SLPINTR; if (sleep >= SLP_TIMED) td->td_flags |= TDF_SLPTIMED; } -} - -static inline bool sq_interrupted_early(thread_t *td, sleep_t sleep) { - return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR; -} - -static int sq_suspend(thread_t *td, void *wchan, const void *waitpt, - sleep_t sleep) { - int status = 0; - - WITH_SPIN_LOCK (td->td_lock) { - if (td->td_flags & TDF_SLEEPY) { - td->td_flags &= ~TDF_SLEEPY; - if (sq_interrupted_early(td, sleep)) { - td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); - status = EINTR; - } else { - td->td_state = TDS_SLEEPING; - sched_switch(); - spin_lock(td->td_lock); - } - } - /* After wakeup, only one of the following flags may be set: - * - TDF_SLPINTR if sleep was aborted, - * - TDF_SLPTIMED if sleep has timed out. */ - if (td->td_flags & TDF_SLPINTR) { - td->td_flags &= ~TDF_SLPINTR; - status = EINTR; - } else if (td->td_flags & TDF_SLPTIMED) { - td->td_flags &= ~TDF_SLPTIMED; - status = ETIMEDOUT; - } - } - - return status; -} - -static int sq_wait(void *wchan, const void *waitpt, sleep_t sleep) { - thread_t *td = thread_self(); - - /* If there are pending signals, interrupt the sleep immediately. */ - WITH_SPIN_LOCK (td->td_lock) { - if (sq_interrupted_early(td, sleep)) - return EINTR; - } - - if (waitpt == NULL) - waitpt = __caller(0); - - sleepq_chain_t *sc = sc_acquire(wchan); - sq_enter(td, wchan, waitpt, sleep); - sc_release(sc); - /* The code can be interrupted in here. - * A race is avoided by clever use of TDF_SLEEPY flag. - * We can also get a signal in here -- interrupt early if we got one. - * The first signal check is an optimization that saves us the call - * to sq_enter. */ - - return sq_suspend(td, wchan, waitpt, sleep); + return sq; } static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { @@ -245,27 +185,69 @@ static void sq_leave(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq) { TAILQ_REMOVE(&sq->sq_free, sq, sq_entry); } - WITH_SPIN_LOCK (td->td_lock) { - td->td_wchan = NULL; - td->td_waitpt = NULL; - td->td_sleepqueue = sq; - } + assert(spin_owned(td->td_lock)); + td->td_wchan = NULL; + td->td_waitpt = NULL; + td->td_sleepqueue = sq; +} + +static inline bool sq_interrupted_early(thread_t *td, sleep_t sleep) { + return (td->td_flags & TDF_NEEDSIGCHK) != 0 && sleep == SLP_INTR; } -static void sq_resume(thread_t *td, int wakeup) { +static int sq_suspend(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, + sleep_t sleep) { + spin_lock(td->td_lock); + + if (td->td_sleepqueue != NULL) { + spin_unlock(td->td_lock); + return 0; + } + + if (sq_interrupted_early(td, sleep)) { + td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); + sq_leave(td, sc, sq); + sc_release(sc); + spin_unlock(td->td_lock); + return EINTR; + } + + thread_lock_set(td, &sc->sc_lock); + td->td_state = TDS_SLEEPING; + sched_switch(); + WITH_SPIN_LOCK (td->td_lock) { - /* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */ - if (wakeup != EINTR) + /* After wakeup, only one of the following flags may be set: + * - TDF_SLPINTR if sleep was aborted, + * - TDF_SLPTIMED if sleep has timed out. */ + if (td->td_flags & TDF_SLPINTR) { td->td_flags &= ~TDF_SLPINTR; - if (wakeup != ETIMEDOUT) + return EINTR; + } + if (td->td_flags & TDF_SLPTIMED) { td->td_flags &= ~TDF_SLPTIMED; - /* Do not try to wake up a thread that is sleepy but did not fall asleep! */ - if (td->td_flags & TDF_SLEEPY) { - td->td_flags &= ~TDF_SLEEPY; - } else { - sched_wakeup(td, 0); + return ETIMEDOUT; } } + + return 0; +} + +static int sq_wait(void *wchan, const void *waitpt, sleep_t sleep) { + thread_t *td = thread_self(); + + /* If there are pending signals, interrupt the sleep immediately. */ + WITH_SPIN_LOCK (td->td_lock) { + if (sq_interrupted_early(td, sleep)) + return EINTR; + } + + if (waitpt == NULL) + waitpt = __caller(0); + + sleepq_chain_t *sc = sc_acquire(wchan); + sleepq_t *sq = sq_enter(td, wchan, waitpt, sleep); + return sq_suspend(td, sc, sq, sleep); } /* Remove a thread from the sleep queue and resume it. */ @@ -281,8 +263,18 @@ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, if ((wakeup == ETIMEDOUT) && !(td->td_flags & TDF_SLPTIMED)) return false; + assert(td->td_lock == &sc->sc_lock); + sq_leave(td, sc, sq); - sq_resume(td, wakeup); + /* Clear TDF_SLPINTR flag if thread's sleep was not aborted. */ + if (wakeup != EINTR) + td->td_flags &= ~TDF_SLPINTR; + if (wakeup != ETIMEDOUT) + td->td_flags &= ~TDF_SLPTIMED; + WITH_SPIN_LOCK(&sched_lock) { + td->td_lock = &sched_lock; + sched_wakeup(td, 0); + } return true; } diff --git a/sys/kern/thread.c b/sys/kern/thread.c index 02e75180a3..6be23bf6b6 100644 --- a/sys/kern/thread.c +++ b/sys/kern/thread.c @@ -22,11 +22,13 @@ static mtx_t *threads_lock = &MTX_INITIALIZER(0); static thread_list_t all_threads = TAILQ_HEAD_INITIALIZER(all_threads); static thread_list_t zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); +extern spin_t sched_lock; + spin_t *thread_lock_set(thread_t *td, spin_t *new) { spin_t *old; - spin_owned(new); + assert(spin_owned(new)); old = td->td_lock; - spin_owned(old); + assert(spin_owned(old)); td->td_lock = new; spin_unlock(old); return old; @@ -93,8 +95,7 @@ thread_t *thread_create(const char *name, void (*fn)(void *), void *arg, td->td_prio = prio; td->td_base_prio = prio; - td->td_lock = kmalloc(M_TEMP, sizeof(spin_t), M_ZERO); - spin_init(td->td_lock, 0); + td->td_lock = &sched_lock; cv_init(&td->td_waitcv, "thread waiters"); LIST_INIT(&td->td_contested); @@ -133,7 +134,6 @@ void thread_delete(thread_t *td) { sleepq_destroy(td->td_sleepqueue); turnstile_destroy(td->td_turnstile); kfree(M_STR, td->td_name); - kfree(M_TEMP, td->td_lock); pool_free(P_THREAD, td); } @@ -169,9 +169,10 @@ __noreturn void thread_exit(void) { TAILQ_INSERT_TAIL(&zombie_threads, td, td_zombieq); } - cv_broadcast(&td->td_waitcv); spin_unlock(td->td_lock); + cv_broadcast(&td->td_waitcv); + spin_lock(td->td_lock); td->td_state = TDS_DEAD; sched_switch(); diff --git a/sys/kern/turnstile.c b/sys/kern/turnstile.c index 2974ef8c4b..1a7a8fe8a6 100644 --- a/sys/kern/turnstile.c +++ b/sys/kern/turnstile.c @@ -131,7 +131,8 @@ static thread_t *acquire_owner(turnstile_t *ts) { assert(ts->ts_state == USED_BLOCKED); thread_t *td = ts->ts_owner; assert(td != NULL); /* Turnstile must have an owner. */ - spin_lock(td->td_lock); + if (!spin_owned(td->td_lock)) + spin_lock(td->td_lock); assert(!td_is_sleeping(td)); /* You must not sleep while holding a mutex. */ return td; } @@ -170,7 +171,7 @@ static void propagate_priority(thread_t *td) { assert(td->td_blocked == NULL); } - spin_unlock(td->td_lock); + // spin_unlock(td->td_lock); } void turnstile_adjust(thread_t *td, prio_t oldprio) { From 2247753db37c061f178080867c82d30bbe96db1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krystian=20Bac=C5=82awski?= Date: Mon, 5 Oct 2020 21:29:53 +0200 Subject: [PATCH 11/11] Fix formatting. --- sys/kern/sleepq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sys/kern/sleepq.c b/sys/kern/sleepq.c index fec905b2a4..f42a57ac40 100644 --- a/sys/kern/sleepq.c +++ b/sys/kern/sleepq.c @@ -207,11 +207,11 @@ static int sq_suspend(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, if (sq_interrupted_early(td, sleep)) { td->td_flags &= ~(TDF_SLPINTR | TDF_SLPTIMED); sq_leave(td, sc, sq); - sc_release(sc); + sc_release(sc); spin_unlock(td->td_lock); return EINTR; } - + thread_lock_set(td, &sc->sc_lock); td->td_state = TDS_SLEEPING; sched_switch(); @@ -271,7 +271,7 @@ static bool sq_wakeup(thread_t *td, sleepq_chain_t *sc, sleepq_t *sq, td->td_flags &= ~TDF_SLPINTR; if (wakeup != ETIMEDOUT) td->td_flags &= ~TDF_SLPTIMED; - WITH_SPIN_LOCK(&sched_lock) { + WITH_SPIN_LOCK (&sched_lock) { td->td_lock = &sched_lock; sched_wakeup(td, 0); }