From 8f973bfb5840a97f8b0aa4fda8b2a4c4ede04d03 Mon Sep 17 00:00:00 2001 From: Peter Mitsis Date: Wed, 9 Oct 2024 15:36:48 -0700 Subject: [PATCH] kernel: Apply 'unlikely' attribute MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Applies the 'unlikely' attribute to various kernel objects that use z_unpend_first_thread() to optimize for the non-blocking path. This boosts the thread_metric synchronization benchmark numbers on the frdm_k64f board by about 10%. (cherry picked from commit cc415bc1398034bfb5980ce8f526e391f5cd8cc8) Original-Signed-off-by: Peter Mitsis GitOrigin-RevId: cc415bc1398034bfb5980ce8f526e391f5cd8cc8 Cr-Build-Id: 8733565196572304993 Cr-Build-Url: https://cr-buildbucket.appspot.com/build/8733565196572304993 Copybot-Job-Name: zephyr-main-copybot-downstream Change-Id: I275af4be4c682e18201421c2e021d5ead8cbac65 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/zephyr/+/5934669 Reviewed-by: Jeremy Bettis Reviewed-by: Dawid Niedźwiecki Commit-Queue: Dawid Niedźwiecki Tested-by: Dawid Niedźwiecki --- kernel/condvar.c | 2 +- kernel/mem_slab.c | 2 +- kernel/msg_q.c | 4 ++-- kernel/mutex.c | 2 +- kernel/queue.c | 2 +- kernel/sched.c | 2 +- kernel/sem.c | 4 ++-- kernel/stack.c | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/kernel/condvar.c b/kernel/condvar.c index b8d0df95341..615a6b30f15 100644 --- a/kernel/condvar.c +++ b/kernel/condvar.c @@ -49,7 +49,7 @@ int z_impl_k_condvar_signal(struct k_condvar *condvar) struct k_thread *thread = z_unpend_first_thread(&condvar->wait_q); - if (thread != NULL) { + if (unlikely(thread != NULL)) { SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_condvar, signal, condvar, K_FOREVER); arch_thread_return_value_set(thread, 0); diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index 86aebe38344..80710d063d9 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -275,7 +275,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem) if ((slab->free_list == NULL) && IS_ENABLED(CONFIG_MULTITHREADING)) { struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q); - if (pending_thread != NULL) { + if (unlikely(pending_thread != NULL)) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab); z_thread_return_value_set_with_data(pending_thread, 0, mem); diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 03975041a0b..ebb593e96e0 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -142,7 +142,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout if (msgq->used_msgs < msgq->max_msgs) { /* message queue isn't full */ pending_thread = z_unpend_first_thread(&msgq->wait_q); - if (pending_thread != NULL) { + if (unlikely(pending_thread != NULL)) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, 0); /* give message to waiting thread */ @@ -242,7 +242,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) /* handle first thread waiting to write (if any) */ pending_thread = z_unpend_first_thread(&msgq->wait_q); - if (pending_thread != NULL) { + if (unlikely(pending_thread != NULL)) { SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout); /* add thread's message to queue */ diff --git a/kernel/mutex.c b/kernel/mutex.c index d60b08c4e81..ce76e5a2af5 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -261,7 +261,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex) LOG_DBG("new owner of mutex %p: %p (prio: %d)", mutex, new_owner, new_owner ? new_owner->base.prio : -1000); - if (new_owner != NULL) { + if (unlikely(new_owner != NULL)) { /* * new owner is already of higher or equal prio than first * waiter since the wait queue is priority-based: no need to diff --git a/kernel/queue.c b/kernel/queue.c index c46447e40ee..6fcc99eddf7 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -139,7 +139,7 @@ static int32_t queue_insert(struct k_queue *queue, void *prev, void *data, } first_pending_thread = z_unpend_first_thread(&queue->wait_q); - if (first_pending_thread != NULL) { + if (unlikely(first_pending_thread != NULL)) { SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER); prepare_thread_to_run(first_pending_thread, data); diff --git a/kernel/sched.c b/kernel/sched.c index 518345cb9ea..bbb12476f4b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -725,7 +725,7 @@ struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q) K_SPINLOCK(&_sched_spinlock) { thread = _priq_wait_best(&wait_q->waitq); - if (thread != NULL) { + if (unlikely(thread != NULL)) { unpend_thread_no_timeout(thread); (void)z_abort_thread_timeout(thread); } diff --git a/kernel/sem.c b/kernel/sem.c index f2ae7a6cf78..58d8a86f1e9 100644 --- a/kernel/sem.c +++ b/kernel/sem.c @@ -103,7 +103,7 @@ void z_impl_k_sem_give(struct k_sem *sem) thread = z_unpend_first_thread(&sem->wait_q); - if (thread != NULL) { + if (unlikely(thread != NULL)) { arch_thread_return_value_set(thread, 0); z_ready_thread(thread); } else { @@ -111,7 +111,7 @@ void z_impl_k_sem_give(struct k_sem *sem) resched = handle_poll_events(sem); } - if (resched) { + if (unlikely(resched)) { z_reschedule(&lock, key); } else { k_spin_unlock(&lock, key); diff --git a/kernel/stack.c b/kernel/stack.c index 04b916e3e83..33512a783cc 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -119,7 +119,7 @@ int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data) first_pending_thread = z_unpend_first_thread(&stack->wait_q); - if (first_pending_thread != NULL) { + if (unlikely(first_pending_thread != NULL)) { z_thread_return_value_set_with_data(first_pending_thread, 0, (void *)data);