Skip to content

Commit

Permalink
[nrf fromlist] subsys: ipc: align icbmsg to no-multithreading
Browse files Browse the repository at this point in the history
Modify ICBMSG so that it could be used in no-multithreading
appliactions.

Upstream PR: zephyrproject-rtos/zephyr#78188

Signed-off-by: Magdalena Pastula <[email protected]>
  • Loading branch information
magp-nordic committed Sep 10, 2024
1 parent 83195a3 commit 5c43ff9
Showing 1 changed file with 58 additions and 0 deletions.
58 changes: 58 additions & 0 deletions subsys/ipc/ipc_service/backends/ipc_icbmsg.c
Original file line number Diff line number Diff line change
Expand Up @@ -174,11 +174,13 @@ struct ept_data {
struct backend_data {
const struct icbmsg_config *conf;/* Backend instance config. */
struct icmsg_data_t control_data;/* ICMsg data. */
#ifdef CONFIG_MULTITHREADING
struct k_mutex mutex; /* Mutex to protect: ICMsg send call and
* waiting_bound field.
*/
struct k_work ep_bound_work; /* Work item for bounding processing. */
struct k_sem block_wait_sem; /* Semaphore for waiting for free blocks. */
#endif
struct ept_data ept[NUM_EPT]; /* Array of registered endpoints. */
uint8_t ept_map[NUM_EPT]; /* Array that maps endpoint address to index. */
uint16_t waiting_bound[NUM_EPT];/* The bound messages waiting to be registered. */
Expand Down Expand Up @@ -209,8 +211,10 @@ struct control_message {

BUILD_ASSERT(NUM_EPT <= EPT_ADDR_INVALID, "Too many endpoints");

#ifdef CONFIG_MULTITHREADING
/* Work queue for bounding processing. */
static struct k_work_q ep_bound_work_q;
#endif

/**
* Calculate pointer to block from its index and channel configuration (RX or TX).
Expand Down Expand Up @@ -327,12 +331,15 @@ static int alloc_tx_buffer(struct backend_data *dev_data, uint32_t *size,
size_t total_size = *size + BLOCK_HEADER_SIZE;
size_t num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size);
struct block_content *block;
#ifdef CONFIG_MULTITHREADING
bool sem_taken = false;
#endif
size_t tx_block_index;
size_t next_bit;
int prev_bit_val;
int r;

#ifdef CONFIG_MULTITHREADING
do {
/* Try to allocate specified number of blocks. */
r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks,
Expand All @@ -358,6 +365,11 @@ static int alloc_tx_buffer(struct backend_data *dev_data, uint32_t *size,
if (sem_taken) {
k_sem_give(&dev_data->block_wait_sem);
}
#else
/* Try to allocate specified number of blocks. */
r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks,
&tx_block_index);
#endif

if (r < 0) {
if (r != -ENOSPC && r != -EAGAIN) {
Expand Down Expand Up @@ -457,8 +469,10 @@ static int release_tx_blocks(struct backend_data *dev_data, size_t tx_block_inde
return r;
}

#ifdef CONFIG_MULTITHREADING
/* Wake up all waiting threads. */
k_sem_give(&dev_data->block_wait_sem);
#endif
}

return tx_block_index;
Expand Down Expand Up @@ -506,10 +520,14 @@ static int send_control_message(struct backend_data *dev_data, enum msg_type msg
};
int r;

#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
r = icmsg_send(&conf->control_config, &dev_data->control_data, &message,
sizeof(message));
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif
if (r < sizeof(message)) {
LOG_ERR("Cannot send over ICMsg, err %d", r);
}
Expand Down Expand Up @@ -685,21 +703,29 @@ static int send_bound_message(struct backend_data *dev_data, struct ept_data *ep
return r;
}

#ifdef CONFIG_MULTITHREADING
/**
* Put endpoint bound processing into system workqueue.
*/
static void schedule_ept_bound_process(struct backend_data *dev_data)
{
k_work_submit_to_queue(&ep_bound_work_q, &dev_data->ep_bound_work);
}
#endif

/**
* Work handler that is responsible to start bounding when ICMsg is bound.
*/
#ifdef CONFIG_MULTITHREADING
static void ept_bound_process(struct k_work *item)
#else
static void ept_bound_process(struct backend_data *dev_data)
#endif
{
#ifdef CONFIG_MULTITHREADING
struct backend_data *dev_data = CONTAINER_OF(item, struct backend_data,
ep_bound_work);
#endif
struct ept_data *ept = NULL;
size_t i;
int r = 0;
Expand All @@ -726,13 +752,19 @@ static void ept_bound_process(struct k_work *item)
}
} else {
/* Walk over all waiting bound messages and match to local endpoints. */
#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
for (i = 0; i < NUM_EPT; i++) {
if (dev_data->waiting_bound[i] != WAITING_BOUND_MSG_EMPTY) {
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif
r = match_bound_msg(dev_data,
dev_data->waiting_bound[i], i);
#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
if (r != 0) {
dev_data->waiting_bound[i] =
WAITING_BOUND_MSG_EMPTY;
Expand All @@ -742,7 +774,9 @@ static void ept_bound_process(struct k_work *item)
}
}
}
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif
}

/* Check if any endpoint is ready to rebound and call the callback if it is. */
Expand Down Expand Up @@ -877,12 +911,20 @@ static int received_bound(struct backend_data *dev_data, size_t rx_block_index,
}

/* Put message to waiting array. */
#ifdef CONFIG_MULTITHREADING
k_mutex_lock(&dev_data->mutex, K_FOREVER);
#endif
dev_data->waiting_bound[ept_addr] = rx_block_index;
#ifdef CONFIG_MULTITHREADING
k_mutex_unlock(&dev_data->mutex);
#endif

#ifdef CONFIG_MULTITHREADING
/* Schedule processing the message. */
schedule_ept_bound_process(dev_data);
#else
ept_bound_process(dev_data);
#endif

return 0;
}
Expand Down Expand Up @@ -958,7 +1000,11 @@ static void control_bound(void *priv)

/* Set flag that ICMsg is bounded and now, endpoint bounding may start. */
atomic_or(&dev_data->flags, CONTROL_BOUNDED);
#ifdef CONFIG_MULTITHREADING
schedule_ept_bound_process(dev_data);
#else
ept_bound_process(dev_data);
#endif
}

/**
Expand Down Expand Up @@ -1045,7 +1091,11 @@ static int register_ept(const struct device *instance, void **token,
if (!matching_state) {
return -EINVAL;
}
#ifdef CONFIG_MULTITHREADING
schedule_ept_bound_process(dev_data);
#else
ept_bound_process(dev_data);
#endif
return 0;
}
}
Expand All @@ -1070,8 +1120,12 @@ static int register_ept(const struct device *instance, void **token,
/* Keep endpoint address in token. */
*token = ept;

#ifdef CONFIG_MULTITHREADING
/* Rest of the bounding will be done in the system workqueue. */
schedule_ept_bound_process(dev_data);
#else
ept_bound_process(dev_data);
#endif

return r;
}
Expand Down Expand Up @@ -1187,6 +1241,7 @@ static int backend_init(const struct device *instance)
{
const struct icbmsg_config *conf = instance->config;
struct backend_data *dev_data = instance->data;
#ifdef CONFIG_MULTITHREADING
static K_THREAD_STACK_DEFINE(ep_bound_work_q_stack, EP_BOUND_WORK_Q_STACK_SIZE);
static bool is_work_q_started;

Expand All @@ -1198,12 +1253,15 @@ static int backend_init(const struct device *instance)

is_work_q_started = true;
}
#endif

dev_data->conf = conf;
dev_data->is_initiator = (conf->rx.blocks_ptr < conf->tx.blocks_ptr);
#ifdef CONFIG_MULTITHREADING
k_mutex_init(&dev_data->mutex);
k_work_init(&dev_data->ep_bound_work, ept_bound_process);
k_sem_init(&dev_data->block_wait_sem, 0, 1);
#endif
memset(&dev_data->waiting_bound, 0xFF, sizeof(dev_data->waiting_bound));
memset(&dev_data->ept_map, EPT_ADDR_INVALID, sizeof(dev_data->ept_map));
return 0;
Expand Down

0 comments on commit 5c43ff9

Please sign in to comment.