From 49f2b6c503d60f84ef8f4b37c77cee5750eb30f7 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Fri, 30 Oct 2020 00:14:15 -0700 Subject: [PATCH 1/9] kernel: add z_thread_free() To match z_thread_malloc() calls. Does not invoke the scheduler. Signed-off-by: Andrew Boie --- kernel/include/kernel_internal.h | 9 ++++++++- kernel/mempool.c | 22 ++++++++++++++++++++++ kernel/msg_q.c | 2 +- kernel/pipes.c | 2 +- kernel/poll.c | 4 ++-- kernel/queue.c | 2 +- kernel/stack.c | 2 +- kernel/userspace.c | 4 ++-- 8 files changed, 38 insertions(+), 9 deletions(-) diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index 277cea79f00c3f..5db4ab2e2b7372 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -51,7 +51,7 @@ extern char *z_setup_new_thread(struct k_thread *new_thread, * * Threads may be assigned a resource pool, which will be used to allocate * memory on behalf of certain kernel and driver APIs. Memory reserved - * in this way should be freed with k_free(). + * in this way should be freed with z_thread_free(). * * If called from an ISR, the k_malloc() system heap will be used if it exists. * @@ -61,6 +61,13 @@ extern char *z_setup_new_thread(struct k_thread *new_thread, */ void *z_thread_malloc(size_t size); +/** + * @brief Free memory allocated from a resource pool + * + * @param ptr Memory to free, or NULL (in which case this is a no-op) + */ +void z_thread_free(void *ptr); + /* set and clear essential thread flag */ extern void z_thread_essential_set(void); diff --git a/kernel/mempool.c b/kernel/mempool.c index 304b39f56830c4..2f3e35a44b3532 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -108,3 +108,25 @@ void *z_thread_malloc(size_t size) return ret; } + +void z_thread_free(void *ptr) +{ +#ifdef CONFIG_MEM_POOL_HEAP_BACKEND + /* Basically k_heap free without scheduling hooks, we never set a + * timeout for z_thread_malloc anyway + */ + if (ptr != NULL) { + k_spinlock_key_t key; + struct k_mem_block_id *id; + + /* point to hidden block descriptor at start of block */ + id = (struct k_mem_block_id *)((char *)ptr - + WB_UP(sizeof(struct k_mem_block_id))); + + /* return block4 to the heap memory pool */ + key = k_spin_lock(&id->heap->lock); + sys_heap_free(&id->heap->heap, id->data); + k_spin_unlock(&id->heap->lock, key); + } +#endif +} diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 7a248f0469a40e..9a199d15bcd9cf 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -106,7 +106,7 @@ int k_msgq_cleanup(struct k_msgq *msgq) } if ((msgq->flags & K_MSGQ_FLAG_ALLOC) != 0) { - k_free(msgq->buffer_start); + z_thread_free(msgq->buffer_start); msgq->flags &= ~K_MSGQ_FLAG_ALLOC; } return 0; diff --git a/kernel/pipes.c b/kernel/pipes.c index 1a7ed10a3a0052..29dde3466d2539 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -181,7 +181,7 @@ int k_pipe_cleanup(struct k_pipe *pipe) } if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0) { - k_free(pipe->buffer); + z_thread_free(pipe->buffer); pipe->buffer = NULL; pipe->flags &= ~K_PIPE_FLAG_ALLOC; } diff --git a/kernel/poll.c b/kernel/poll.c index e9fba2177909b8..5e6395444f7be4 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -374,11 +374,11 @@ static inline int z_vrfy_k_poll(struct k_poll_event *events, ret = k_poll(events_copy, num_events, timeout); (void)memcpy((void *)events, events_copy, bounds); out_free: - k_free(events_copy); + z_thread_free(events_copy); out: return ret; oops_free: - k_free(events_copy); + z_thread_free(events_copy); Z_OOPS(1); } #include diff --git a/kernel/queue.c b/kernel/queue.c index e082080bc1cc43..114ad935cbdce2 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -42,7 +42,7 @@ void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free) anode = CONTAINER_OF(node, struct alloc_node, node); ret = anode->data; if (needs_free) { - k_free(anode); + z_thread_free(anode); } } else { /* Data was directly placed in the queue, the first word diff --git a/kernel/stack.c b/kernel/stack.c index 75f15026c15516..067176f3a4d84c 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -87,7 +87,7 @@ int k_stack_cleanup(struct k_stack *stack) } if ((stack->flags & K_STACK_FLAG_ALLOC) != (uint8_t)0) { - k_free(stack->base); + z_thread_free(stack->base); stack->base = NULL; stack->flags &= ~K_STACK_FLAG_ALLOC; } diff --git a/kernel/userspace.c b/kernel/userspace.c index f538f2dcf11926..27abd90fabf47e 100644 --- a/kernel/userspace.c +++ b/kernel/userspace.c @@ -353,7 +353,7 @@ void k_object_free(void *obj) k_spin_unlock(&objfree_lock, key); if (dyn != NULL) { - k_free(dyn); + z_thread_free(dyn); } } @@ -449,7 +449,7 @@ static void unref_check(struct z_object *ko, uintptr_t index) rb_remove(&obj_rb_tree, &dyn->node); sys_dlist_remove(&dyn->obj_list); - k_free(dyn); + z_thread_free(dyn); out: #endif k_spin_unlock(&obj_lock, key); From 9eb594adc1a3811aec31e60fb66039f4c598e9df Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 29 Oct 2020 13:40:28 -0700 Subject: [PATCH 2/9] kernel: add public scheduler APIs The intention of this header is to provide a supported interface for building IPC objects. Callers can use these functions to implement IPC without worrying about safety with respect to sched_spinlock or thread objects unexpectedly changing state. Previously, implementations of IPC objects were using various ksched.h APIs which are kernel-private and can present thread safety issues. This is a formal set of APIs which should always do the right thing. Variants using irq locking instead of spinlocks have been intentionally omitted; anything still doing this should be considered legacy and unsupported. A warning is added to ksched.h to convey how perilous it can be to use the APIs within; we're not even using these in a completely safe way within the core kernel, much less users outside of it, although there are no known issues on uniprocessor systems. Signed-off-by: Andrew Boie --- include/sys/scheduler.h | 171 ++++++++++++++++++++++++++++++++++++++++ kernel/include/ksched.h | 10 +++ kernel/sched.c | 45 +++++++++++ 3 files changed, 226 insertions(+) create mode 100644 include/sys/scheduler.h diff --git a/include/sys/scheduler.h b/include/sys/scheduler.h new file mode 100644 index 00000000000000..e7b88b15964ec3 --- /dev/null +++ b/include/sys/scheduler.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_SYS_SCHEDULER_H +#define ZEPHYR_INCLUDE_SYS_SCHEDULER_H + +#include +#include +#include + +/* + * APIs for working with the Zephyr kernel scheduler. Intended for use in + * management of IPC objects, either in the core kernel or other IPC + * implemented by OS compatibility layers, providing basic wait/wake operations + * with spinlocks used for synchronization. + * + * These APIs are public and will be treated as contract, even if the + * underlying scheduler implementation changes. + */ + +/** + * Callback function prototype for k_sched_wake_one/wake_all + * + * If a thread is woken up, the callback function is called with the thread + * as an argument. The sched_spinlock is held the entire time, ensuring that + * thread state doesn't suddenly change (from some nested IRQ or activity on + * another CPU) when the callback is modifying the thread's state. + * + * It is only inside these callbacks that it is safe to inspect or modify + * the thread that was woken up. + */ +typedef void (*k_sched_wake_cb_t)(struct k_thread *thread, void *obj, + void *context); + +/** + * Wake up a thread pending on the provided wait queue, with callback + * + * Given a wait_q, wake up the highest priority thread on the queue. If the + * queue was empty just return false. + * + * Otherwise, do the following, in order, holding sched_spinlock the entire + * time so that the thread state is guaranteed not to change: + * - if the callback function is provided, run the callback function with + * the provided object and context pointers + * - Set the thread's swap return values to swap_retval and swap_data + * - un-pend and ready the thread, but do not invoke the scheduler. + * + * It is up to the caller to implement locking such that the return value of + * this function (whether a thread was woken up or not) does not immediately + * become stale. + * + * Repeated calls to this function until it returns false is a suitable + * way to wake all threads on the queue. + * + * It is up to the caller to implement locking such that the return value of + * this function does not immediately become stale. Calls to wait and wake on + * the same wait_q object must have synchronization. Calling this without + * holding any spinlock is a sign that this API is not being used properly. + * + * @param wait_q Wait queue to wake up the highest prio thread + * @param swap_retval Swap return value for woken thread + * @param swap_data Data return value to supplement swap_retval. May be NULL. + * @param cb Callback function, or NULL in which case no callback is invoked. + * @param obj Kernel object associated with this wake operation, passed to cb. + * Only used by the callback. May be NULL. + * @param context Additional context pointer associated with this wake + * operation. Only used by the callback. May be NULL. + * @retval true If a thread waa woken up + * @retval false If the wait_q was empty + */ +bool k_sched_callback_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data, + k_sched_wake_cb_t cb, void *obj, void *context); + +/** + * Wake up a thread pending on the provided wait queue + * + * Convenient function to k_sched_callback_wake(), for the very common case + * where no callback is required. + * + * @param wait_q Wait queue to wake up the highest prio thread + * @param swap_retval Swap return value for woken thread + * @param swap_data Data return value to supplement swap_retval. May be NULL. + * @retval true If a thread waa woken up + * @retval false If the wait_q was empty + */ +static inline bool k_sched_wake(_wait_q_t *wait_q, int swap_retval, + void *swap_data) +{ + return k_sched_callback_wake(wait_q, swap_retval, swap_data, + NULL, NULL, NULL); +} + +/** + * Wake up all threads pending on the provided wait queue + * + * Convenience function to invoke k_sched_wake() on all threads in the queue + * until there are no more to wake up. + * + * @param wait_q Wait queue to wake up the highest prio thread + * @param swap_retval Swap return value for woken thread + * @param swap_data Data return value to supplement swap_retval. May be NULL. + * @retval true If any threads were woken up + * @retval false If the wait_q was empty + */ +static inline bool k_sched_wake_all(_wait_q_t *wait_q, int swap_retval, + void *swap_data) +{ + bool woken = false; + + while (k_sched_wake(wait_q, swap_retval, swap_data)) { + woken = true; + } + + /* True if we woke at least one thread up */ + return woken; +} + +/** + * Atomically put the current thread to sleep on a wait queue, with timeout + * + * The thread will be added to the provided waitqueue. The lock, which should + * be held by the caller with the provided key, will be released once this is + * completely done and we have swapped out. + * + * The return value and data pointer is set by whoever woke us up via + * k_sched_wake. + * + * @param lock Address of spinlock to release when we swap out + * @param key Key to the provided spinlock when it was locked + * @param wait_q Wait queue to go to sleep on + * @param timeout Waiting period to be woken up, or K_FOREVER to wait + * indefinitely. + * @param data Storage location for data pointer set when thread was woken up. + * May be NULL if not used. + * @retval Return value set by whatever woke us up, or -EAGAIN if the timeout + * expired without being woken up. + */ +int k_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, + _wait_q_t *wait_q, k_timeout_t timeout, void **data); + +/** + * Atomically invoke a scheduling decision + * + * Waking up a thread with APIs like k_sched_wake() un-pends and readies + * the woken up thread, but does not implicitly invoke the scheduler to + * immediately schedule them. This allows other tasks to be performed in + * between. + * + * When called, the kernel may context switch to some other thread, + * releasing the lock once we have switched out. The lock will never + * be held when this returns. + * + * If the caller is a co-operative thread, or the next thread to run on this + * CPU is otherwise still the current thread, this just unlocks the spinlock + * and returns. + * + * Most users will use the same lock here that synchronizes k_sched_wait() + * and k_sched_wake() calls. + * + * It is safe (albeit very slightly wasteful) to call this even if no thread + * was woken up. + * + * @param lock Address of spinlock to release when we swap out + * @param key Key to the provided spinlock when it was locked + */ +void k_sched_invoke(struct k_spinlock *lock, k_spinlock_key_t key); + +#endif /* ZEPHYR_INCLUDE_SCHEDULER_H */ diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index c81eac4064f8e4..52ce2a24acbfac 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -13,6 +13,16 @@ #include #include +/* WARNING WARNING WARNING HERE THERE BE DRAGONS + * + * Many of the APIs here are unsafe to call without sched_spinlock held. + * This is particularly the case with most of the APIs here which take a + * struct k_thread pointer argument. + * + * The contents of this file should be treated as private to the core kernel + * and may change at any time. + */ + BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO >= K_HIGHEST_APPLICATION_THREAD_PRIO); diff --git a/kernel/sched.c b/kernel/sched.c index 32c79a34bee680..b983cf4b014ef4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -16,6 +16,7 @@ #include #include #include +#include LOG_MODULE_DECLARE(os); /* Maximum time between the time a self-aborting thread flags itself @@ -1614,3 +1615,47 @@ static inline void z_vrfy_k_thread_abort(k_tid_t thread) } #include #endif /* CONFIG_USERSPACE */ + +/* + * scheduler.h API implementations + */ +bool k_sched_callback_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data, + k_sched_wake_cb_t cb, void *obj, void *context) +{ + struct k_thread *thread; + bool ret = false; + + LOCKED(&sched_spinlock) { + thread = _priq_wait_best(&wait_q->waitq); + + if (thread != NULL) { + if (cb != NULL) { + cb(thread, obj, context); + } + z_thread_return_value_set_with_data(thread, + swap_retval, + swap_data); + unpend_thread_no_timeout(thread); + (void)z_abort_thread_timeout(thread); + ready_thread(thread); + ret = true; + } + } + + return ret; +} + +int k_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, + _wait_q_t *wait_q, k_timeout_t timeout, void **data) +{ + int ret = z_pend_curr(lock, key, wait_q, timeout); + if (data != NULL) { + *data = _current->base.swap_data; + } + return ret; +} + +void k_sched_invoke(struct k_spinlock *lock, k_spinlock_key_t key) +{ + z_reschedule(lock, key); +} From 8308be537d882b27fc0dc12d6d2542121c761c7f Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 29 Oct 2020 14:16:59 -0700 Subject: [PATCH 3/9] kernel: sem: use scheduler.h APIs Closes races where thread state could change in between the z_unpend_first_thread() and z_ready_thread() calls on SMP. Signed-off-by: Andrew Boie --- kernel/sem.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/kernel/sem.c b/kernel/sem.c index 85c8946614cf79..00ae70519f7540 100644 --- a/kernel/sem.c +++ b/kernel/sem.c @@ -28,6 +28,7 @@ #include #include #include +#include /* We use a system-wide lock to synchronize semaphores, which has * unfortunate performance impact vs. using a per-object lock @@ -107,20 +108,14 @@ static inline void handle_poll_events(struct k_sem *sem) void z_impl_k_sem_give(struct k_sem *sem) { k_spinlock_key_t key = k_spin_lock(&lock); - struct k_thread *thread; sys_trace_semaphore_give(sem); - thread = z_unpend_first_thread(&sem->wait_q); - if (thread != NULL) { - arch_thread_return_value_set(thread, 0); - z_ready_thread(thread); - } else { + if (!k_sched_wake(&sem->wait_q, 0, NULL)) { sem->count += (sem->count != sem->limit) ? 1U : 0U; handle_poll_events(sem); } - - z_reschedule(&lock, key); + k_sched_invoke(&lock, key); sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE); } @@ -156,8 +151,7 @@ int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout) goto out; } - ret = z_pend_curr(&lock, key, &sem->wait_q, timeout); - + ret = k_sched_wait(&lock, key, &sem->wait_q, timeout, NULL); out: sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); return ret; From eaf3137d6a544b1331f079c7e1ab6eafb8a65b39 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 29 Oct 2020 14:53:03 -0700 Subject: [PATCH 4/9] kernel: mem_slab: use scheduler.h Closes races where 'pending_thread' could unexpectedly change state in between ksched.h calls, which are no longer used. Signed-off-by: Andrew Boie --- kernel/mem_slab.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index f070939c1e1bb3..7bd12c55d68e93 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -14,6 +14,7 @@ #include #include #include +#include static struct k_spinlock lock; @@ -118,11 +119,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) result = -ENOMEM; } else { /* wait for a free block or timeout */ - result = z_pend_curr(&lock, key, &slab->wait_q, timeout); - if (result == 0) { - *mem = _current->base.swap_data; - } - return result; + return k_sched_wait(&lock, key, &slab->wait_q, timeout, mem); } k_spin_unlock(&lock, key); @@ -133,12 +130,9 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) void k_mem_slab_free(struct k_mem_slab *slab, void **mem) { k_spinlock_key_t key = k_spin_lock(&lock); - struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q); - if (pending_thread != NULL) { - z_thread_return_value_set_with_data(pending_thread, 0, *mem); - z_ready_thread(pending_thread); - z_reschedule(&lock, key); + if (k_sched_wake(&slab->wait_q, 0, *mem)) { + k_sched_invoke(&lock, key); } else { **(char ***)mem = slab->free_list; slab->free_list = *(char **)mem; From 5aec626e492f47863c90b9551e6943d95406fe77 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 29 Oct 2020 15:06:45 -0700 Subject: [PATCH 5/9] kernel: futex: use scheduler.h Closes races where woken up thread could unexpectedly change state in between ksched.h calls, which are no longer used. Signed-off-by: Andrew Boie --- kernel/futex.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/kernel/futex.c b/kernel/futex.c index c52f90d655df4a..6c7657a8f3bfee 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -5,12 +5,10 @@ */ #include -#include #include -#include #include -#include -#include +#include +#include static struct z_futex_data *k_futex_find_data(struct k_futex *futex) { @@ -28,7 +26,6 @@ int z_impl_k_futex_wake(struct k_futex *futex, bool wake_all) { k_spinlock_key_t key; unsigned int woken = 0; - struct k_thread *thread; struct z_futex_data *futex_data; futex_data = k_futex_find_data(futex); @@ -37,17 +34,15 @@ int z_impl_k_futex_wake(struct k_futex *futex, bool wake_all) } key = k_spin_lock(&futex_data->lock); - do { - thread = z_unpend_first_thread(&futex_data->wait_q); - if (thread) { - z_ready_thread(thread); - arch_thread_return_value_set(thread, 0); + if (k_sched_wake(&futex_data->wait_q, 0, NULL)) { woken++; + } else { + break; } - } while (thread && wake_all); + } while (wake_all); - z_reschedule(&futex_data->lock, key); + k_sched_invoke(&futex_data->lock, key); return woken; } @@ -81,8 +76,8 @@ int z_impl_k_futex_wait(struct k_futex *futex, int expected, return -EAGAIN; } - ret = z_pend_curr(&futex_data->lock, - key, &futex_data->wait_q, timeout); + ret = k_sched_wait(&futex_data->lock, key, &futex_data->wait_q, + timeout, NULL); if (ret == -EAGAIN) { ret = -ETIMEDOUT; } From 6a99ccaec2fbc1345d147a0d08d1c7b98a02a2bb Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 29 Oct 2020 15:50:16 -0700 Subject: [PATCH 6/9] kernel: kheap: use scheduler.h Closes races where woken up thread could unexpectedly change state in between ksched.h calls, which are no longer used. Signed-off-by: Andrew Boie --- kernel/kheap.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/kernel/kheap.c b/kernel/kheap.c index b436e097be565c..bb2591ee6a39f4 100644 --- a/kernel/kheap.c +++ b/kernel/kheap.c @@ -5,9 +5,10 @@ */ #include -#include #include #include +#include +#include void k_heap_init(struct k_heap *h, void *mem, size_t bytes) { @@ -42,8 +43,8 @@ void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout) break; } - (void) z_pend_curr(&h->lock, key, &h->wait_q, - K_TICKS(end - now)); + (void)k_sched_wait(&h->lock, key, &h->wait_q, + K_TICKS(end - now), NULL); key = k_spin_lock(&h->lock); } @@ -57,8 +58,8 @@ void k_heap_free(struct k_heap *h, void *mem) sys_heap_free(&h->heap, mem); - if (z_unpend_all(&h->wait_q) != 0) { - z_reschedule(&h->lock, key); + if (k_sched_wake_all(&h->wait_q, 0, NULL)) { + k_sched_invoke(&h->lock, key); } else { k_spin_unlock(&h->lock, key); } From f136cbece295fe1a8ba920fca8f1f9f4b474f64e Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 29 Oct 2020 15:36:53 -0700 Subject: [PATCH 7/9] kernel: stack: use scheduler.h Closes races where woken up thread could unexpectedly change state in between ksched.h calls, which are no longer used. Signed-off-by: Andrew Boie --- kernel/stack.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/kernel/stack.c b/kernel/stack.c index 067176f3a4d84c..ba79268625cd15 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -12,12 +12,12 @@ #include #include #include -#include #include #include #include #include #include +#include #ifdef CONFIG_OBJECT_TRACING @@ -96,7 +96,6 @@ int k_stack_cleanup(struct k_stack *stack) int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data) { - struct k_thread *first_pending_thread; int ret = 0; k_spinlock_key_t key = k_spin_lock(&stack->lock); @@ -105,14 +104,8 @@ int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data) goto out; } - first_pending_thread = z_unpend_first_thread(&stack->wait_q); - - if (first_pending_thread != NULL) { - z_ready_thread(first_pending_thread); - - z_thread_return_value_set_with_data(first_pending_thread, - 0, (void *)data); - z_reschedule(&stack->lock, key); + if (k_sched_wake(&stack->wait_q, 0, (void *)data)) { + k_sched_invoke(&stack->lock, key); goto end; } else { *(stack->next) = data; @@ -157,13 +150,9 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, return -EBUSY; } - result = z_pend_curr(&stack->lock, key, &stack->wait_q, timeout); - if (result == -EAGAIN) { - return -EAGAIN; - } - - *data = (stack_data_t)_current->base.swap_data; - return 0; + result = k_sched_wait(&stack->lock, key, &stack->wait_q, timeout, + (void **)data); + return result; } #ifdef CONFIG_USERSPACE From 1c9716f4b147b6a8e7319d7ef08b7100d3f90021 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Fri, 30 Oct 2020 11:24:48 -0700 Subject: [PATCH 8/9] kernel: msgq: use scheduler.h APIs Closes races where woken up thread could unexpectedly change state in between ksched.h calls, which are no longer used. Signed-off-by: Andrew Boie --- kernel/msg_q.c | 83 ++++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 43 deletions(-) diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 9a199d15bcd9cf..a51e69fa755294 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -112,12 +112,27 @@ int k_msgq_cleanup(struct k_msgq *msgq) return 0; } +static inline void update_msgq_write(struct k_msgq *msgq) +{ + msgq->write_ptr += msgq->msg_size; + if (msgq->write_ptr == msgq->buffer_end) { + msgq->write_ptr = msgq->buffer_start; + } + msgq->used_msgs++; +} + +static void pending_read_cb(struct k_thread *thread, void *obj, void *context) +{ + struct k_msgq *msgq = obj; + + /* give message to waiting thread */ + (void)memcpy(thread->base.swap_data, context, msgq->msg_size); +} int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout) { __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), ""); - struct k_thread *pending_thread; k_spinlock_key_t key; int result; @@ -125,24 +140,15 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout if (msgq->used_msgs < msgq->max_msgs) { /* message queue isn't full */ - pending_thread = z_unpend_first_thread(&msgq->wait_q); - if (pending_thread != NULL) { - /* give message to waiting thread */ - (void)memcpy(pending_thread->base.swap_data, data, - msgq->msg_size); - /* wake up waiting thread */ - arch_thread_return_value_set(pending_thread, 0); - z_ready_thread(pending_thread); - z_reschedule(&msgq->lock, key); + if (k_sched_callback_wake(&msgq->wait_q, 0, NULL, + pending_read_cb, msgq, + (void *)data)) { + k_sched_invoke(&msgq->lock, key); return 0; } else { /* put message in queue */ (void)memcpy(msgq->write_ptr, data, msgq->msg_size); - msgq->write_ptr += msgq->msg_size; - if (msgq->write_ptr == msgq->buffer_end) { - msgq->write_ptr = msgq->buffer_start; - } - msgq->used_msgs++; + update_msgq_write(msgq); } result = 0; } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { @@ -151,7 +157,8 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout } else { /* wait for put message success, failure, or timeout */ _current->base.swap_data = (void *) data; - return z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout); + return k_sched_wait(&msgq->lock, key, &msgq->wait_q, timeout, + NULL); } k_spin_unlock(&msgq->lock, key); @@ -189,12 +196,19 @@ static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *q, #include #endif +static void pending_write_cb(struct k_thread *thread, void *obj, void *context) +{ + struct k_msgq *msgq = obj; + + /* add thread's message to queue */ + (void)memcpy(msgq->write_ptr, thread->base.swap_data, msgq->msg_size); +} + int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) { __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), ""); k_spinlock_key_t key; - struct k_thread *pending_thread; int result; key = k_spin_lock(&msgq->lock); @@ -209,21 +223,10 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) msgq->used_msgs--; /* handle first thread waiting to write (if any) */ - pending_thread = z_unpend_first_thread(&msgq->wait_q); - if (pending_thread != NULL) { - /* add thread's message to queue */ - (void)memcpy(msgq->write_ptr, pending_thread->base.swap_data, - msgq->msg_size); - msgq->write_ptr += msgq->msg_size; - if (msgq->write_ptr == msgq->buffer_end) { - msgq->write_ptr = msgq->buffer_start; - } - msgq->used_msgs++; - - /* wake up waiting thread */ - arch_thread_return_value_set(pending_thread, 0); - z_ready_thread(pending_thread); - z_reschedule(&msgq->lock, key); + if (k_sched_callback_wake(&msgq->wait_q, 0, NULL, + pending_write_cb, msgq, NULL)) { + update_msgq_write(msgq); + k_sched_invoke(&msgq->lock, key); return 0; } result = 0; @@ -233,7 +236,8 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) } else { /* wait for get message success or timeout */ _current->base.swap_data = data; - return z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout); + return k_sched_wait(&msgq->lock, key, &msgq->wait_q, timeout, + NULL); } k_spin_unlock(&msgq->lock, key); @@ -288,20 +292,13 @@ static inline int z_vrfy_k_msgq_peek(struct k_msgq *q, void *data) void z_impl_k_msgq_purge(struct k_msgq *msgq) { k_spinlock_key_t key; - struct k_thread *pending_thread; key = k_spin_lock(&msgq->lock); - /* wake up any threads that are waiting to write */ - while ((pending_thread = z_unpend_first_thread(&msgq->wait_q)) != NULL) { - arch_thread_return_value_set(pending_thread, -ENOMSG); - z_ready_thread(pending_thread); - } - + k_sched_wake_all(&msgq->wait_q, -ENOMSG, NULL); msgq->used_msgs = 0; msgq->read_ptr = msgq->write_ptr; - - z_reschedule(&msgq->lock, key); + k_sched_invoke(&msgq->lock, key); } #ifdef CONFIG_USERSPACE From 92ce7b226ef7355083b83bf261e7f22aea97d611 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 29 Oct 2020 15:25:42 -0700 Subject: [PATCH 9/9] kernel: queue: use scheduler.h Closes races where woken up thread could unexpectedly change state in between ksched.h calls, which are no longer used. Signed-off-by: Andrew Boie --- kernel/queue.c | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/kernel/queue.c b/kernel/queue.c index 114ad935cbdce2..e386833e7f7901 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -16,11 +16,11 @@ #include #include #include -#include #include #include #include #include +#include struct alloc_node { sys_sfnode_t node; @@ -98,12 +98,6 @@ static inline void z_vrfy_k_queue_init(struct k_queue *queue) #include #endif -static void prepare_thread_to_run(struct k_thread *thread, void *data) -{ - z_thread_return_value_set_with_data(thread, 0, data); - z_ready_thread(thread); -} - static inline void handle_poll_events(struct k_queue *queue, uint32_t state) { #ifdef CONFIG_POLL @@ -114,16 +108,10 @@ static inline void handle_poll_events(struct k_queue *queue, uint32_t state) void z_impl_k_queue_cancel_wait(struct k_queue *queue) { k_spinlock_key_t key = k_spin_lock(&queue->lock); - struct k_thread *first_pending_thread; - - first_pending_thread = z_unpend_first_thread(&queue->wait_q); - - if (first_pending_thread != NULL) { - prepare_thread_to_run(first_pending_thread, NULL); - } + k_sched_wake(&queue->wait_q, -EAGAIN, NULL); handle_poll_events(queue, K_POLL_STATE_CANCELLED); - z_reschedule(&queue->lock, key); + k_sched_invoke(&queue->lock, key); } #ifdef CONFIG_USERSPACE @@ -138,17 +126,13 @@ static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue) static int32_t queue_insert(struct k_queue *queue, void *prev, void *data, bool alloc, bool is_append) { - struct k_thread *first_pending_thread; k_spinlock_key_t key = k_spin_lock(&queue->lock); if (is_append) { prev = sys_sflist_peek_tail(&queue->data_q); } - first_pending_thread = z_unpend_first_thread(&queue->wait_q); - - if (first_pending_thread != NULL) { - prepare_thread_to_run(first_pending_thread, data); - z_reschedule(&queue->lock, key); + if (k_sched_wake(&queue->wait_q, 0, data)) { + k_sched_invoke(&queue->lock, key); return 0; } @@ -170,7 +154,7 @@ static int32_t queue_insert(struct k_queue *queue, void *prev, void *data, sys_sflist_insert(&queue->data_q, prev, data); handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); - z_reschedule(&queue->lock, key); + k_sched_invoke(&queue->lock, key); /* redundant? */ return 0; } @@ -228,16 +212,9 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail) } k_spinlock_key_t key = k_spin_lock(&queue->lock); - struct k_thread *thread = NULL; - - if (head != NULL) { - thread = z_unpend_first_thread(&queue->wait_q); - } - while ((head != NULL) && (thread != NULL)) { - prepare_thread_to_run(thread, head); + while (head != NULL && k_sched_wake(&queue->wait_q, 0, head)) { head = *(void **)head; - thread = z_unpend_first_thread(&queue->wait_q); } if (head != NULL) { @@ -245,7 +222,7 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail) } handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); - z_reschedule(&queue->lock, key); + k_sched_invoke(&queue->lock, key); return 0; } @@ -295,9 +272,10 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout) return NULL; } - int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout); + int ret = k_sched_wait(&queue->lock, key, &queue->wait_q, timeout, + &data); - return (ret != 0) ? NULL : _current->base.swap_data; + return (ret != 0) ? NULL : data; } #ifdef CONFIG_USERSPACE