1. 程式人生 > >Linux Kernel排程管理之idle程序框架

Linux Kernel排程管理之idle程序框架

swapper程序,即idle程序,其主體是cpu_idle_loop
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void cpu_idle_loop(void)
{
int cpu = smp_processor_id();


       while (1) {
/*
 * If the arch has a polling bit, we maintain an invariant:
 *
 * Our polling bit is clear if we're not scheduled (i.e. if
 * rq->curr != rq->idle).  This means that, if rq->idle has
 * the polling bit set, then setting need_resched is
 * guaranteed to cause the cpu to reschedule.
 */


__current_set_polling();
quiet_vmstat();
tick_nohz_idle_enter();


while (!need_resched()) {
check_pgt_cache(); rmb(); if (cpu_is_offline(cpu)) { cpuhp_report_idle_dead(); arch_cpu_idle_dead(); } local_irq_disable(); arch_cpu_idle_enter(); /* * In poll mode we reenable interrupts and spin. * * Also if we detected in the wakeup from idle * path that the tick broadcast device expired * for us, we don't want to go deep idle as we * know that the IPI is going to arrive right * away */下面這個是呼叫體系相關的睡眠程式碼 if (cpu_idle_force_poll || tick_check_broadcast_expired()) cpu_idle_poll(); else cpuidle_idle_call();
arch_cpu_idle_exit(); } /* * Since we fell out of the loop above, we know * TIF_NEED_RESCHED must be set, propagate it into * PREEMPT_NEED_RESCHED. * * This is required because for polling idle loops we will * not have had an IPI to fold the state for us. */ preempt_set_need_resched(); tick_nohz_idle_exit(); __current_clr_polling(); /* * We promise to call sched_ttwu_pending and reschedule * if need_resched is set while polling is set.  That * means that clearing polling needs to be visible * before doing these things. */ smp_mb__after_atomic(); sched_ttwu_pending(); schedule_preempt_disabled(); }
}
開機後,通過下面方式進入idle:
start_kernel --> rest_init --> cpu_startup_entry
void cpu_startup_entry(enum cpuhp_state state)
{
/*
 * This #ifdef needs to die, but it's too late in the cycle to
 * make this generic (arm and sh have never invoked the canary
 * init for the non boot cpus!). Will be fixed in 3.11
 */
#ifdef CONFIG_X86
/*
 * If we're the non-boot CPU, nothing set the stack canary up
 * for us. The boot CPU already has it initialized but no harm
 * in doing it again. This is a good place for updating it, as
 * we wont ever return from this function (so the invalid
 * canaries already on the stack wont ever trigger).
 */
boot_init_stack_canary();
#endif
arch_cpu_idle_prepare();
cpuhp_online_idle(state);
cpu_idle_loop();
}