2 Commits

Author SHA1 Message Date
Travis Geiselbrecht
0f5c859117 [kernel][thread] quick optimization to precheck the scheduler before trying
Before putting the old thread into the run queue, do a quick check to
see if there's any potential for reschedule in the yield and preempt case.
2013-05-05 19:25:20 -07:00
Travis Geiselbrecht
5ad38b93cc [arch][arm-m] slight code optimization in the context switch path 2013-05-05 18:29:41 -07:00
5 changed files with 70 additions and 36 deletions

View File

@@ -131,7 +131,7 @@ void arm_cm_irq_entry(void)
void arm_cm_irq_exit(bool reschedule)
{
if (reschedule)
if (reschedule && thread_might_resched())
arm_cm_trigger_preempt();
dec_critical_section();

View File

@@ -87,7 +87,7 @@ static void pendsv(struct arm_cm_exception_frame_long *frame)
arch_disable_ints();
inc_critical_section();
ASSERT(critical_section_count == 1);
DEBUG_ASSERT(critical_section_count == 1);
LTRACEF("preempting thread %p (%s)\n", current_thread, current_thread->name);
@@ -114,8 +114,7 @@ __NAKED void _pendsv(void)
"push { r4-r11, lr };"
"mov r0, sp;"
"bl %0;"
"pop { r4-r11, lr };"
"bx lr;"
"pop { r4-r11, pc };"
:: "i" (pendsv)
);
__UNREACHABLE;
@@ -130,8 +129,7 @@ __NAKED void _svc(void)
__asm__ volatile(
/* load the pointer to the original exception frame we want to restore */
"mov sp, r4;"
"pop { r4-r11, lr };"
"bx lr;"
"pop { r4-r11, pc };"
);
}
@@ -160,19 +158,18 @@ __NAKED static void _arch_non_preempt_context_switch(vaddr_t *fromsp, vaddr_t to
"str sp, [r0];"
"mov sp, r1;"
"pop { r4-r11, lr };"
"clrex;"
"bx lr;"
"pop { r4-r11, pc };"
);
}
/* second half of a regular context switch, bounced to from pendsv */
__NAKED static void _thread_mode_bounce(void)
{
__asm__ volatile(
"pop { r4-r11, lr };"
"bx lr;"
"clrex;"
"pop { r4-r11, pc };"
);
__UNREACHABLE;
}
/*
@@ -205,9 +202,7 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread)
__asm__ volatile(
"mov sp, %0;"
"cpsie i;"
"pop { r4-r11, lr };"
"clrex;"
"bx lr;"
"pop { r4-r11, pc };"
:: "r"(newthread->arch.sp)
);
__UNREACHABLE;
@@ -219,7 +214,6 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread)
frame->pc = (uint32_t)&_thread_mode_bounce;
frame->psr = (1 << 24); /* thread bit set, IPSR 0 */
frame->r0 = frame->r1 = frame->r2 = frame->r3 = frame->r12 = frame->lr = 99;
LTRACEF("iretting to user space\n");
//hexdump(frame, sizeof(*frame) + 64);

View File

@@ -121,6 +121,7 @@ void thread_sleep(lk_time_t delay);
status_t thread_detach(thread_t *t);
status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout);
status_t thread_detach_and_resume(thread_t *t);
bool thread_might_resched(void);
void dump_thread(thread_t *t);
void dump_all_threads(void);

View File

@@ -15,6 +15,5 @@ MODULE_SRCS := \
$(LOCAL_DIR)/thread.c \
$(LOCAL_DIR)/timer.c \
$(LOCAL_DIR)/semaphore.c \
include make/module.mk

View File

@@ -65,6 +65,9 @@ int critical_section_count;
static struct list_node run_queue[NUM_PRIORITIES];
static uint32_t run_queue_bitmap;
// at the moment, can't deal with more than 32 priority levels
STATIC_ASSERT(NUM_PRIORITIES <= 32);
/* the bootstrap thread (statically allocated) */
static thread_t bootstrap_thread;
@@ -367,6 +370,15 @@ static void idle_thread_routine(void)
arch_idle();
}
/* use the priority bitmap to decide which run queue to look into if we needed to reschedule */
static int highest_sched_priority(void)
{
/* undefined to call it with a zeroed bitmap (running on idle thread with no other threads in the queue) */
DEBUG_ASSERT(run_queue_bitmap != 0);
return HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
}
/**
* @brief Cause another thread to be executed.
*
@@ -377,7 +389,7 @@ static void idle_thread_routine(void)
* This is probably not the function you're looking for. See
* thread_yield() instead.
*/
void thread_resched(void)
static void thread_resched(void)
{
thread_t *oldthread;
thread_t *newthread;
@@ -393,16 +405,12 @@ void thread_resched(void)
oldthread = current_thread;
// at the moment, can't deal with more than 32 priority levels
ASSERT(NUM_PRIORITIES <= 32);
// should at least find the idle thread
#if THREAD_CHECKS
ASSERT(run_queue_bitmap != 0);
#endif
int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
//dprintf(SPEW, "bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue);
int next_queue = highest_sched_priority();
newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
@@ -466,6 +474,18 @@ void thread_resched(void)
arch_context_switch(oldthread, newthread);
}
/* if we yielded or preempted would there be a potential for reschedule */
bool thread_might_resched(void)
{
/* if the bitmap is zero, we're the idle thread and the scheduler
* queue is empty, so the answer is no
*/
if (likely(run_queue_bitmap == 0))
return false;
return highest_sched_priority() >= current_thread->priority;
}
/**
* @brief Yield the cpu to another thread
*
@@ -486,11 +506,15 @@ void thread_yield(void)
THREAD_STATS_INC(yields);
/* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
current_thread->state = THREAD_READY;
/* give up our quantum */
current_thread->remaining_quantum = 0;
insert_in_run_queue_tail(current_thread);
thread_resched();
if (thread_might_resched()) {
/* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
current_thread->state = THREAD_READY;
insert_in_run_queue_tail(current_thread);
thread_resched();
}
exit_critical_section();
}
@@ -526,13 +550,15 @@ void thread_preempt(void)
KEVLOG_THREAD_PREEMPT(current_thread);
/* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
current_thread->state = THREAD_READY;
if (current_thread->remaining_quantum > 0)
insert_in_run_queue_head(current_thread);
else
insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
thread_resched();
if (thread_might_resched()) {
/* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
current_thread->state = THREAD_READY;
if (current_thread->remaining_quantum > 0)
insert_in_run_queue_head(current_thread);
else
insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
thread_resched();
}
exit_critical_section();
}
@@ -679,7 +705,18 @@ void thread_set_priority(int priority)
priority = LOWEST_PRIORITY;
if (priority > HIGHEST_PRIORITY)
priority = HIGHEST_PRIORITY;
enter_critical_section();
current_thread->priority = priority;
/* if we've changed priority such that someone else needs to run,
* immediately yield the cpu.
*/
if (thread_might_resched()) {
thread_yield();
}
exit_critical_section();
}
/**
@@ -692,12 +729,15 @@ void thread_set_priority(int priority)
void thread_become_idle(void)
{
thread_set_name("idle");
thread_set_priority(IDLE_PRIORITY);
idle_thread = current_thread;
/* release the implicit boot critical section and yield to the scheduler */
/* set our priority to idle, which also kicks the scheduler
* if any other threads are queued up.
*/
thread_set_priority(IDLE_PRIORITY);
/* release the implicit boot critical section */
exit_critical_section();
thread_yield();
idle_thread_routine();
}