Skip to content

Commit

Permalink
kernel: add public scheduler APIs
Browse files Browse the repository at this point in the history
The intention of this header is to provide a supported interface
for building IPC objects. Callers can use these functions to
implement IPC without worrying about safety with respect to
sched_spinlock or thread objects unexpectedly changing state.

Previously, implementations of IPC objects were using various
ksched.h APIs which are kernel-private and can present thread
safety issues. This is a formal set of APIs which should always
do the right thing.

Variants using irq locking instead of spinlocks have been
intentionally omitted; anything still doing this should be
considered legacy and unsupported.

A warning is added to ksched.h to convey how perilous it can be
to use the APIs within; we're not even using these in a completely
safe way within the core kernel, much less users outside of it,
although there are no known issues on uniprocessor systems.

Signed-off-by: Andrew Boie <[email protected]>
  • Loading branch information
Andrew Boie authored and pabigot committed Dec 20, 2020
1 parent 0b8e6d5 commit fed7fdd
Show file tree
Hide file tree
Showing 3 changed files with 227 additions and 0 deletions.
171 changes: 171 additions & 0 deletions include/sys/scheduler.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
/*
* Copyright (c) 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/

#ifndef ZEPHYR_INCLUDE_SYS_SCHEDULER_H
#define ZEPHYR_INCLUDE_SYS_SCHEDULER_H

#include <kernel.h>
#include <sys_clock.h>
#include <wait_q.h>

/*
* APIs for working with the Zephyr kernel scheduler. Intended for use in
* management of IPC objects, either in the core kernel or other IPC
* implemented by OS compatibility layers, providing basic wait/wake operations
* with spinlocks used for synchronization.
*
* These APIs are public and will be treated as contract, even if the
* underlying scheduler implementation changes.
*/

/**
* Callback function prototype for k_sched_wake_one/wake_all
*
* If a thread is woken up, the callback function is called with the thread
* as an argument. The sched_spinlock is held the entire time, ensuring that
* thread state doesn't suddenly change (from some nested IRQ or activity on
* another CPU) when the callback is modifying the thread's state.
*
* It is only inside these callbacks that it is safe to inspect or modify
* the thread that was woken up.
*/
typedef void (*k_sched_wake_cb_t)(struct k_thread *thread, void *obj,
void *context);

/**
* Wake up a thread pending on the provided wait queue, with callback
*
* Given a wait_q, wake up the highest priority thread on the queue. If the
* queue was empty just return false.
*
* Otherwise, do the following, in order, holding sched_spinlock the entire
* time so that the thread state is guaranteed not to change:
* - if the callback function is provided, run the callback function with
* the provided object and context pointers
* - Set the thread's swap return values to swap_retval and swap_data
* - un-pend and ready the thread, but do not invoke the scheduler.
*
* It is up to the caller to implement locking such that the return value of
* this function (whether a thread was woken up or not) does not immediately
* become stale.
*
* Repeated calls to this function until it returns false is a suitable
* way to wake all threads on the queue.
*
* It is up to the caller to implement locking such that the return value of
* this function does not immediately become stale. Calls to wait and wake on
* the same wait_q object must have synchronization. Calling this without
* holding any spinlock is a sign that this API is not being used properly.
*
* @param wait_q Wait queue to wake up the highest prio thread
* @param swap_retval Swap return value for woken thread
* @param swap_data Data return value to supplement swap_retval. May be NULL.
* @param cb Callback function, or NULL in which case no callback is invoked.
* @param obj Kernel object associated with this wake operation, passed to cb.
* Only used by the callback. May be NULL.
* @param context Additional context pointer associated with this wake
* operation. Only used by the callback. May be NULL.
* @retval true If a thread waa woken up
* @retval false If the wait_q was empty
*/
bool k_sched_callback_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data,
k_sched_wake_cb_t cb, void *obj, void *context);

/**
* Wake up a thread pending on the provided wait queue
*
* Convenient function to k_sched_callback_wake(), for the very common case
* where no callback is required.
*
* @param wait_q Wait queue to wake up the highest prio thread
* @param swap_retval Swap return value for woken thread
* @param swap_data Data return value to supplement swap_retval. May be NULL.
* @retval true If a thread waa woken up
* @retval false If the wait_q was empty
*/
static inline bool k_sched_wake(_wait_q_t *wait_q, int swap_retval,
void *swap_data)
{
return k_sched_callback_wake(wait_q, swap_retval, swap_data,
NULL, NULL, NULL);
}

/**
* Wake up all threads pending on the provided wait queue
*
* Convenience function to invoke k_sched_wake() on all threads in the queue
* until there are no more to wake up.
*
* @param wait_q Wait queue to wake up the highest prio thread
* @param swap_retval Swap return value for woken thread
* @param swap_data Data return value to supplement swap_retval. May be NULL.
* @retval true If any threads were woken up
* @retval false If the wait_q was empty
*/
static inline bool k_sched_wake_all(_wait_q_t *wait_q, int swap_retval,
void *swap_data)
{
bool woken = false;

while (k_sched_wake(wait_q, swap_retval, swap_data)) {
woken = true;
}

/* True if we woke at least one thread up */
return woken;
}

/**
* Atomically put the current thread to sleep on a wait queue, with timeout
*
* The thread will be added to the provided waitqueue. The lock, which should
* be held by the caller with the provided key, will be released once this is
* completely done and we have swapped out.
*
* The return value and data pointer is set by whoever woke us up via
* k_sched_wake.
*
* @param lock Address of spinlock to release when we swap out
* @param key Key to the provided spinlock when it was locked
* @param wait_q Wait queue to go to sleep on
* @param timeout Waiting period to be woken up, or K_FOREVER to wait
* indefinitely.
* @param data Storage location for data pointer set when thread was woken up.
* May be NULL if not used.
* @retval Return value set by whatever woke us up, or -EAGAIN if the timeout
* expired without being woken up.
*/
int k_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout, void **data);

/**
* Atomically invoke a scheduling decision
*
* Waking up a thread with APIs like k_sched_wake() un-pends and readies
* the woken up thread, but does not implicitly invoke the scheduler to
* immediately schedule them. This allows other tasks to be performed in
* between.
*
* When called, the kernel may context switch to some other thread,
* releasing the lock once we have switched out. The lock will never
* be held when this returns.
*
* If the caller is a co-operative thread, or the next thread to run on this
* CPU is otherwise still the current thread, this just unlocks the spinlock
* and returns.
*
* Most users will use the same lock here that synchronizes k_sched_wait()
* and k_sched_wake() calls.
*
* It is safe (albeit very slightly wasteful) to call this even if no thread
* was woken up.
*
* @param lock Address of spinlock to release when we swap out
* @param key Key to the provided spinlock when it was locked
*/
void k_sched_invoke(struct k_spinlock *lock, k_spinlock_key_t key);

#endif /* ZEPHYR_INCLUDE_SCHEDULER_H */
10 changes: 10 additions & 0 deletions kernel/include/ksched.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@
#include <tracing/tracing.h>
#include <stdbool.h>

/* WARNING WARNING WARNING HERE THERE BE DRAGONS
*
* Many of the APIs here are unsafe to call without sched_spinlock held.
* This is particularly the case with most of the APIs here which take a
* struct k_thread pointer argument.
*
* The contents of this file should be treated as private to the core kernel
* and may change at any time.
*/

BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
>= K_HIGHEST_APPLICATION_THREAD_PRIO);

Expand Down
46 changes: 46 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <kernel_internal.h>
#include <logging/log.h>
#include <sys/atomic.h>
#include <sys/scheduler.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

/* Maximum time between the time a self-aborting thread flags itself
Expand Down Expand Up @@ -1619,3 +1620,48 @@ static inline void z_vrfy_k_thread_abort(k_tid_t thread)
}
#include <syscalls/k_thread_abort_mrsh.c>
#endif /* CONFIG_USERSPACE */

/*
* scheduler.h API implementations
*/
bool k_sched_callback_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data,
k_sched_wake_cb_t cb, void *obj, void *context)
{
struct k_thread *thread;
bool ret = false;

LOCKED(&sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);

if (thread != NULL) {
if (cb != NULL) {
cb(thread, obj, context);
}
z_thread_return_value_set_with_data(thread,
swap_retval,
swap_data);
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
ready_thread(thread);
ret = true;
}
}

return ret;
}

int k_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout, void **data)
{
int ret = z_pend_curr(lock, key, wait_q, timeout);

if (data != NULL) {
*data = _current->base.swap_data;
}
return ret;
}

void k_sched_invoke(struct k_spinlock *lock, k_spinlock_key_t key)
{
z_reschedule(lock, key);
}

0 comments on commit fed7fdd

Please sign in to comment.