From 14d9a97a7cc5da188bf5ba301c3636365c6d2f81 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Wed, 10 Feb 2021 14:54:21 -0800 Subject: [PATCH] lib/p4wq: Fix race with completed work items Work items can be legally resubmitted from within their own handler. Currently the p4wq detects this case by checking their thread field to see if it's been set to NULL. But that's a race, because if the item was NOT resubmitted then it no longer belongs to the queue and may have been freed or reused or otherwise clobbered legally by user code. Instead, steal a single bit in the thread struct for this purpose. This patch adds a K_CALLBACK_STATE bit in user_options and documents it in such a way (as being intended for "callback manager" utilities) that it can't be used recursively or otherwise collide. Fixes #32052 Signed-off-by: Andy Ross --- include/kernel.h | 11 +++++++++++ lib/os/p4wq.c | 19 ++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/include/kernel.h b/include/kernel.h index b51a67b466be77..5af88c2eedab4d 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -233,6 +233,17 @@ extern void k_thread_foreach_unlocked( */ #define K_INHERIT_PERMS (BIT(3)) +/** + * @brief Callback item state + * + * @details + * This is a single bit of state reserved for "callback manager" + * utilities (p4wq initially) who need to track operations invoked + * from within a user-provided callback they have been invoked. + * Effectively it serves as a tiny bit of zero-overhead TLS data. + */ +#define K_CALLBACK_STATE (BIT(4)) + #ifdef CONFIG_X86 /* x86 Bitmask definitions for threads user options */ diff --git a/lib/os/p4wq.c b/lib/os/p4wq.c index 605535460201c8..ca98a57899706a 100644 --- a/lib/os/p4wq.c +++ b/lib/os/p4wq.c @@ -36,6 +36,21 @@ static bool rb_lessthan(struct rbnode *a, struct rbnode *b) return (uintptr_t)a < (uintptr_t)b; } +static void thread_set_requeued(struct k_thread *th) +{ + th->base.user_options |= K_CALLBACK_STATE; +} + +static void thread_clear_requeued(struct k_thread *th) +{ + th->base.user_options &= ~K_CALLBACK_STATE; +} + +static bool thread_was_requeued(struct k_thread *th) +{ + return !!(th->base.user_options & K_CALLBACK_STATE); +} + /* Slightly different semantics: rb_lessthan must be perfectly * symmetric (to produce a single tree structure) and will use the * pointer value to break ties where priorities are equal, here we @@ -70,6 +85,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) w->thread = _current; sys_dlist_append(&queue->active, &w->dlnode); set_prio(_current, w); + thread_clear_requeued(_current); k_spin_unlock(&queue->lock, k); w->handler(w); @@ -78,7 +94,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) /* Remove from the active list only if it * wasn't resubmitted already */ - if (w->thread == _current) { + if (!thread_was_requeued(_current)) { sys_dlist_remove(&w->dlnode); w->thread = NULL; } @@ -142,6 +158,7 @@ void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item) /* Resubmission from within handler? Remove from active list */ if (item->thread == _current) { sys_dlist_remove(&item->dlnode); + thread_set_requeued(_current); item->thread = NULL; } __ASSERT_NO_MSG(item->thread == NULL);