Skip to content

Commit

Permalink
kernel: Add new k_thread_abort()/k_thread_join()
Browse files Browse the repository at this point in the history
Add a newer, much smaller and simpler implementation of abort and
join.  No need to involve the idle thread.  No need for a special code
path for self-abort.  Joining a thread and waiting for an aborting one
to terminate elsewhere share an implementation.  All work in both
calls happens under a single locked path with no unexpected
synchronization points.

This fixes a bug with the current implementation where the action of
z_sched_single_abort() was nonatomic, releasing the lock internally at
a point where the thread to be aborted could self-abort and confuse
the state such that it failed to abort at all.

Note that the arm32 and native_posix architectures, which have their
own thread abort implementations, now see a much simplified
"z_thread_abort()" internal API.

Signed-off-by: Andy Ross <[email protected]>
  • Loading branch information
Andy Ross authored and nashif committed Feb 24, 2021
1 parent c0c8cb0 commit 6fb6d3c
Show file tree
Hide file tree
Showing 8 changed files with 136 additions and 19 deletions.
7 changes: 1 addition & 6 deletions arch/arm/core/aarch32/cortex_m/thread_abort.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,8 @@ void z_impl_k_thread_abort(k_tid_t thread)
* is not an implicit scheduler invocation.
*/
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
} else {
z_self_abort(); /* Never returns */
}
}

z_thread_single_abort(thread);

/* The abort handler might have altered the ready queue. */
z_reschedule_unlocked();
z_thread_abort(thread);
}
10 changes: 1 addition & 9 deletions arch/posix/core/posix_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -506,17 +506,9 @@ void z_impl_k_thread_abort(k_tid_t thread)
threads_table[thread_idx].thead_cnt,
thread_idx,
__func__);

if (arch_is_in_isr()) {
z_thread_single_abort(thread);
return;
}

z_self_abort();
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}

z_thread_single_abort(thread);
z_thread_abort(thread);

if (tstatus->aborted == 0) {
PC_DEBUG("%s aborting now [%i] %i\n",
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/core/x86_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1598,7 +1598,7 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
/* Thread is migrating to another memory domain and not
* exiting for good; we weren't called from
* z_thread_single_abort(). Resetting the stack region will
* z_thread_abort(). Resetting the stack region will
* take place in the forthcoming thread_add() call.
*/
return;
Expand Down
3 changes: 3 additions & 0 deletions include/kernel/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,9 @@ struct k_thread {
/** static thread init data */
void *init_data;

/** threads waiting in k_thread_join() */
_wait_q_t join_queue;

#if defined(CONFIG_POLL)
struct z_poller poller;
#endif
Expand Down
2 changes: 1 addition & 1 deletion kernel/include/kernel_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ bool z_stack_is_user_capable(k_thread_stack_t *stack);
/* Memory domain setup hook, called from z_setup_new_thread() */
void z_mem_domain_init_thread(struct k_thread *thread);

/* Memory domain teardown hook, called from z_thread_single_abort() */
/* Memory domain teardown hook, called from z_thread_abort() */
void z_mem_domain_exit_thread(struct k_thread *thread);

/* This spinlock:
Expand Down
3 changes: 1 addition & 2 deletions kernel/include/ksched.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,9 @@ void z_sched_abort(struct k_thread *thread);
void z_sched_ipi(void);
void z_sched_start(struct k_thread *thread);
void z_ready_thread(struct k_thread *thread);
void z_thread_single_abort(struct k_thread *thread);
FUNC_NORETURN void z_self_abort(void);
void z_requeue_current(struct k_thread *curr);
struct k_thread *z_swap_next_thread(void);
void z_thread_abort(struct k_thread *thread);

static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout)
{
Expand Down
127 changes: 127 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ struct z_kernel _kernel;
struct k_spinlock sched_spinlock;

static void update_cache(int);
static void end_thread(struct k_thread *thread);

static inline int is_preempt(struct k_thread *thread)
{
Expand Down Expand Up @@ -216,6 +217,11 @@ void z_requeue_current(struct k_thread *curr)
}
#endif

static inline bool is_aborting(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_ABORTING) != 0;
}

static ALWAYS_INLINE struct k_thread *next_up(void)
{
struct k_thread *thread;
Expand Down Expand Up @@ -259,6 +265,10 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
* "ready", it means "is _current already added back to the
* queue such that we don't want to re-add it".
*/
if (is_aborting(_current)) {
end_thread(_current);
}

int queued = z_is_thread_queued(_current);
int active = !z_is_thread_prevented_from_running(_current);

Expand Down Expand Up @@ -1427,6 +1437,123 @@ int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)

#endif /* CONFIG_SCHED_CPU_MASK */

static inline void unpend_all(_wait_q_t *wait_q)
{
struct k_thread *thread;

while ((thread = z_waitq_head(wait_q)) != NULL) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
arch_thread_return_value_set(thread, 0);
ready_thread(thread);
}
}

static void end_thread(struct k_thread *thread)
{
/* We hold the lock, and the thread is known not to be running
* anywhere.
*/
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
thread->base.thread_state |= _THREAD_DEAD;
thread->base.thread_state &= ~_THREAD_ABORTING;
if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread);
}
if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread);
}
(void)z_abort_thread_timeout(thread);
unpend_all(&thread->join_queue);
update_cache(1);

sys_trace_thread_abort(thread);
z_thread_monitor_exit(thread);

#ifdef CONFIG_USERSPACE
z_mem_domain_exit_thread(thread);
z_thread_perms_all_clear(thread);
z_object_uninit(thread->stack_obj);
z_object_uninit(thread);
#endif
}
}

void z_thread_abort(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);

if (thread->base.thread_state & _THREAD_DEAD) {
k_spin_unlock(&sched_spinlock, key);
return;
}

#ifdef CONFIG_SMP
if (is_aborting(thread) && thread == _current && arch_is_in_isr()) {
/* Another CPU is spinning for us, don't deadlock */
end_thread(thread);
}

bool active = thread_active_elsewhere(thread);

if (active) {
/* It's running somewhere else, flag and poke */
thread->base.thread_state |= _THREAD_ABORTING;
arch_sched_ipi();
}

if (is_aborting(thread) && thread != _current) {
if (arch_is_in_isr()) {
/* ISRs can only spin waiting another CPU */
k_spin_unlock(&sched_spinlock, key);
while (is_aborting(thread)) {
}
} else if (active) {
/* Threads can join */
add_to_waitq_locked(_current, &thread->join_queue);
z_swap(&sched_spinlock, key);
}
return; /* lock has been released */
}
#endif
end_thread(thread);
if (thread == _current && !arch_is_in_isr()) {
z_swap(&sched_spinlock, key);
__ASSERT(false, "aborted _current back from dead");
}
k_spin_unlock(&sched_spinlock, key);
}

#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
void z_impl_k_thread_abort(struct k_thread *thread)
{
z_thread_abort(thread);
}
#endif

int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
int ret = 0;

if (thread->base.thread_state & _THREAD_DEAD) {
ret = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
ret = -EBUSY;
} else if (thread == _current ||
thread->base.pended_on == &_current->join_queue) {
ret = -EDEADLK;
} else {
__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
add_to_waitq_locked(_current, &thread->join_queue);
add_thread_timeout(_current, timeout);
return z_swap(&sched_spinlock, key);
}

k_spin_unlock(&sched_spinlock, key);
return ret;
}

#ifdef CONFIG_USERSPACE
/* Special case: don't oops if the thread is uninitialized. This is because
* the initialization bit does double-duty for thread objects; if false, means
Expand Down
1 change: 1 addition & 0 deletions kernel/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,7 @@ char *z_setup_new_thread(struct k_thread *new_thread,
/* Any given thread has access to itself */
k_object_access_grant(new_thread, new_thread);
#endif
z_waitq_init(&new_thread->join_queue);

/* Initialize various struct k_thread members */
z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
Expand Down

0 comments on commit 6fb6d3c

Please sign in to comment.