Ядро Linux в комментариях




Kernel/sched.c - часть 6


26767 if (!c) { 26768 struct task_struct *p; 26769 read_lock(&tasklist_lock); 26770 for_each_task(p) 26771 p->counter = (p->counter >> 1) + p->priority; 26772 read_unlock(&tasklist_lock); 26773 } 26774 } 26775 26776 /* maintain the per-process 'average timeslice' value. 26777 * (this has to be recalculated even if we reschedule 26778 * to the same process) Currently this is only used on 26779 * SMP: */ 26780 #ifdef __SMP__ 26781 { 26782 cycles_t t, this_slice; 26783

26784 t = get_cycles(); 26785 this_slice = t - sched_data->last_schedule; 26786 sched_data->last_schedule = t; 26787 26788 /* Simple, exponentially fading average calculation: 26789 */ 26790 prev->avg_slice = this_slice + prev->avg_slice; 26791 prev->avg_slice >>= 1; 26792 } 26793 26794 /* We drop the scheduler lock early (it's a global 26795 * spinlock), thus we have to lock the previous process 26796 * from getting rescheduled during switch_to(). */

26797 next->processor = this_cpu; 26798 next->has_cpu = 1; 26799 spin_unlock(&scheduler_lock); 26800 #endif /* __SMP__ */ 26801 if (prev != next) { 26802 #ifdef __SMP__ 26803 sched_data->prev = prev; 26804 #endif 26805 kstat.context_swtch++; 26806 get_mmu_context(next); 26807 switch_to(prev,next); 26808 26809 __schedule_tail(); 26810 } 26811 26812 reacquire_kernel_lock(current); 26813 return; 26814 26815 scheduling_in_interrupt: 26816 printk("Scheduling in interrupt\n"); 26817 *(int *)0 = 0; 26818 } 26819 26820 rwlock_t waitqueue_lock = RW_LOCK_UNLOCKED; 26821 26822 /* wake_up doesn't wake up stopped processes - they have 26823 * to be awakened with signals or similar. 26824 * 26825 * Note that we only need a read lock for the wait queue 26826 * (and thus do not have to protect against interrupts), 26827 * as the actual removal from the queue is handled by the 26828 * process itself. */

26829 void __wake_up(struct wait_queue **q, unsigned int mode) 26830 { 26831 struct wait_queue *next; 26832 26833 read_lock(&waitqueue_lock); 26834 if (q && (next = *q)) { 26835 struct wait_queue *head; 26836 26837 head = WAIT_QUEUE_HEAD(q); 26838 while (next != head) { 26839 struct task_struct *p = next->task; 26840 next = next->next; 26841 if (p->state & mode) 26842 wake_up_process(p); 26843 } 26844 } 26845 read_unlock(&waitqueue_lock); 26846 } 26847 26848 /* Semaphores are implemented using a two-way counter: 26849 * The "count" variable is decremented for each process 26850 * that tries to sleep, while the "waking" variable is 26851 * incremented when the "up()" code goes to wake up 26852 * waiting processes. 26853 * 26854 * Notably, the inline "up()" and "down()" functions can 26855 * efficiently test if they need to do any extra work (up 26856 * needs to do something only if count was negative 26857 * before the increment operation. 26858 * 26859 * waking_non_zero() (from asm/semaphore.h) must execute 26860 * atomically. 26861 * 26862 * When __up() is called, the count was negative before 26863 * incrementing it, and we need to wake up somebody. 26864 * 26865 * This routine adds one to the count of processes that 26866 * need to wake up and exit. ALL waiting processes 26867 * actually wake up but only the one that gets to the 26868 * "waking" field first will gate through and acquire the 26869 * semaphore. The others will go back to sleep. 26870 * 26871 * Note that these functions are only called when there 26872 * is contention on the lock, and as such all this is the 26873 * "non-critical" part of the whole semaphore 26874 * business. The critical part is the inline stuff in 26875 * <asm/semaphore.h> where we want to avoid any extra 26876 * jumps and calls. */




Содержание  Назад  Вперед