Skip to content

Commit

Permalink
kernel: Apply 'unlikely' attribute
Browse files Browse the repository at this point in the history
Applies the 'unlikely' attribute to various kernel objects that
use z_unpend_first_thread() to optimize for the non-blocking path.

This boosts the thread_metric synchronization benchmark numbers
on the frdm_k64f board by about 10%.

(cherry picked from commit cc415bc)

Original-Signed-off-by: Peter Mitsis <[email protected]>
GitOrigin-RevId: cc415bc
Cr-Build-Id: 8733565196572304993
Cr-Build-Url: https://cr-buildbucket.appspot.com/build/8733565196572304993
Copybot-Job-Name: zephyr-main-copybot-downstream
Change-Id: I275af4be4c682e18201421c2e021d5ead8cbac65
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/zephyr/+/5934669
Reviewed-by: Jeremy Bettis <[email protected]>
Reviewed-by: Dawid Niedźwiecki <[email protected]>
Commit-Queue: Dawid Niedźwiecki <[email protected]>
Tested-by: Dawid Niedźwiecki <[email protected]>
  • Loading branch information
peter-mitsis authored and Chromeos LUCI committed Oct 21, 2024
1 parent d8d40f6 commit 8f973bf
Show file tree
Hide file tree
Showing 8 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion kernel/condvar.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ int z_impl_k_condvar_signal(struct k_condvar *condvar)

struct k_thread *thread = z_unpend_first_thread(&condvar->wait_q);

if (thread != NULL) {
if (unlikely(thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_condvar, signal, condvar, K_FOREVER);

arch_thread_return_value_set(thread, 0);
Expand Down
2 changes: 1 addition & 1 deletion kernel/mem_slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
if ((slab->free_list == NULL) && IS_ENABLED(CONFIG_MULTITHREADING)) {
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);

if (pending_thread != NULL) {
if (unlikely(pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);

z_thread_return_value_set_with_data(pending_thread, 0, mem);
Expand Down
4 changes: 2 additions & 2 deletions kernel/msg_q.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout
if (msgq->used_msgs < msgq->max_msgs) {
/* message queue isn't full */
pending_thread = z_unpend_first_thread(&msgq->wait_q);
if (pending_thread != NULL) {
if (unlikely(pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, 0);

/* give message to waiting thread */
Expand Down Expand Up @@ -242,7 +242,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)

/* handle first thread waiting to write (if any) */
pending_thread = z_unpend_first_thread(&msgq->wait_q);
if (pending_thread != NULL) {
if (unlikely(pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout);

/* add thread's message to queue */
Expand Down
2 changes: 1 addition & 1 deletion kernel/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
LOG_DBG("new owner of mutex %p: %p (prio: %d)",
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);

if (new_owner != NULL) {
if (unlikely(new_owner != NULL)) {
/*
* new owner is already of higher or equal prio than first
* waiter since the wait queue is priority-based: no need to
Expand Down
2 changes: 1 addition & 1 deletion kernel/queue.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
}
first_pending_thread = z_unpend_first_thread(&queue->wait_q);

if (first_pending_thread != NULL) {
if (unlikely(first_pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);

prepare_thread_to_run(first_pending_thread, data);
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -725,7 +725,7 @@ struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
K_SPINLOCK(&_sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);

if (thread != NULL) {
if (unlikely(thread != NULL)) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/sem.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,15 @@ void z_impl_k_sem_give(struct k_sem *sem)

thread = z_unpend_first_thread(&sem->wait_q);

if (thread != NULL) {
if (unlikely(thread != NULL)) {
arch_thread_return_value_set(thread, 0);
z_ready_thread(thread);
} else {
sem->count += (sem->count != sem->limit) ? 1U : 0U;
resched = handle_poll_events(sem);
}

if (resched) {
if (unlikely(resched)) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
Expand Down
2 changes: 1 addition & 1 deletion kernel/stack.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data)

first_pending_thread = z_unpend_first_thread(&stack->wait_q);

if (first_pending_thread != NULL) {
if (unlikely(first_pending_thread != NULL)) {
z_thread_return_value_set_with_data(first_pending_thread,
0, (void *)data);

Expand Down

0 comments on commit 8f973bf

Please sign in to comment.