一  关于进程的补充

进程调度的时机

中断处理过程(包括时钟中断、I/O中断、系统调用和异常)中,直接调用schedule(),或者返回用户态时根据need_resched标记调用schedule();

内核线程可以直接调用schedule()进行进程切换,也可以在中断处理过程中进行调度,也就是说内核线程作为一类的特殊的进程可以主动调度,也可以被动调度;

用户态进程无法实现主动调度,仅能通过陷入内核态后的某个时机点进行调度,即在中断处理过程中进行调度。

进程的切换

为了控制进程的执行,内核必须有能力挂起正在CPU上执行的进程,并恢复以前挂起的某个进程的执行,这叫做进程切换、任务切换、上下文切换;

挂起正在CPU上执行的进程,与中断时保存现场是不同的,中断前后是在同一个进程上下文中,只是由用户态转向内核态执行;

进程上下文包含了进程执行需要的所有信息

用户地址空间: 包括程序代码,数据,用户堆栈等

控制信息 :进程描述符,内核堆栈等

硬件上下文(注意中断也要保存硬件上下文只是保存的方法不同)

二  代码分析

(不同于上几次的模式,即先用gdb跟踪分析,再分析代码)linux中通过schedule()函数来实现进程的调度,涉及到知识很多,如,调度的策略,时间片的分配,进程上下文的切换等,这里只是粗浅的分析schedule等相关函数

先给出代码:schedule()

asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current; sched_submit_work(tsk);
__schedule();
}

  其中 sched_submit_work()是为了避免进程睡眠时发生死锁。

static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
if (blk_needs_flush_plug(tsk))
blk_schedule_flush_plug(tsk);
}

__schedule() 是进程调度的函数(里面还有嵌套~)

 static void __sched __schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu; need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr; schedule_debug(prev); if (sched_feat(HRTICK))
hrtick_clear(rq); /*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock); switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = ; /*
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
}
switch_count = &prev->nvcsw;
} if (task_on_rq_queued(prev) || rq->skip_clock_update < )
update_rq_clock(rq); next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->skip_clock_update = ; if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */
/*
* The context switch have flipped the stack from under us
* and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock); post_schedule(rq); sched_preempt_enable_no_resched();
if (need_resched())
goto need_resched;
}

上面的红色给出了重点标记,context_switch(),进行了进程上下文切换。

 static inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
struct mm_struct *mm, *oldmm; prepare_task_switch(rq, prev, next); mm = next->mm;
oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev); if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next); if (!prev->mm) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
spin_release(&rq->lock.dep_map, , _THIS_IP_); context_tracking_task_switch(prev, next);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev); barrier();
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
finish_task_switch(this_rq(), prev);
}

context_switch

前文中提到的进程上下文关于硬件上下文,将当前进程的寄存器中的内容保存,并装入下一进程的以前保存的堆栈,寄存器的内容,即下面的switch_to()实现:

 #define switch_to(prev, next, last)                    \
32do { \
/* \
34 * Context-switching clobbers all registers, so we clobber \
35 * them explicitly, via unused output variables. \
36 * (EAX and EBP is not listed because EBP is saved/restored \
37 * explicitly for wchan access and EAX is the return value of \
38 * __switch_to()) \
39 */ \
unsigned long ebx, ecx, edx, esi, edi; \
\
asm volatile("pushfl\n\t" /* save flags */ \
"pushl %%ebp\n\t" /* save EBP */ \
"movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
"movl %[next_sp],%%esp\n\t" /* restore ESP */ \
"movl $1f,%[prev_ip]\n\t" /* save EIP */ \
"pushl %[next_ip]\n\t" /* restore EIP */ \
__switch_canary \
"jmp __switch_to\n" /* regparm call */ \
"1:\t" \
"popl %%ebp\n\t" /* restore EBP */ \
"popfl\n" /* restore flags */ \
\
/* output parameters */ \
: [prev_sp] "=m" (prev->thread.sp), \
[prev_ip] "=m" (prev->thread.ip), \
"=a" (last), \
\
/* clobbered output registers: */ \
"=b" (ebx), "=c" (ecx), "=d" (edx), \
"=S" (esi), "=D" (edi) \
\
__switch_canary_oparam \
\
/* input parameters: */ \
: [next_sp] "m" (next->thread.sp), \
[next_ip] "m" (next->thread.ip), \
\
/* regparm parameters for __switch_to(): */ \
[prev] "a" (prev), \
[next] "d" (next) \
\
__switch_canary_iparam \
\
: /* reloaded segment registers */ \
"memory"); \
} while ()

可以从上面的嵌入汇编看出主要就是保存当前进程的flags,ebp,esp,eip,并装入下一进程的flags,ebp,esp,eip,当执行到 "pushl %[next_ip]\n\t" ,即进入到下一个进程的范畴了。

三  利用gdb工具跟踪

配置qemu环境:

linux 内核学习之八   进程调度过程分析-LMLPHP

设置断点:

linux 内核学习之八   进程调度过程分析-LMLPHP

发现gdb工具探测不到context_switch(),switch_to() ,只有断点1有效,下面的结果也验证了

linux 内核学习之八   进程调度过程分析-LMLPHP

停在第一个断点处。接着运行“c”,但是断点一直在schedule()处

主要是context_switch(),switch_to() 两处设置不了断点,有待解决,希望大家指出是什么问题。。。。

三  总结分析

    linux与任何分时系统一样,通过一个进程到另一个进程的快速切换,达到表面上看起来多个任务并行执行的效果。

schedule()可以由几个内核控制路径调用,可以采取直接调用的方式或延迟调用的方式。

直接调用:如果current进程不能获得必须的资源而要立刻被阻塞,直接调用调度程序。

延迟调用:把current进程的TIF_NEED_RESCHED标志设置为1,而以延迟的方式调度调度程序。由于总是在恢复用户态进程的执行之前检查这个标志的值,所以schedule()将在不久后的某个时刻被明确的执行。

by:方龙伟

原创作品 转载请注明出处

《Linux内核分析》MOOC课程http://mooc.study.163.com/course/USTC-1000029000

04-24 17:03
查看更多