Ядро Linux в комментариях




Kernel/sched.c - часть 3


26221 static inline void reschedule_idle( 26222 struct task_struct * p) 26223 { 26224 26225 if (p->policy != SCHED_OTHER 26226 p->counter > current->counter + 3) { 26227 current->need_resched = 1; 26228 return; 26229 } 26230 26231 #ifdef __SMP__ 26232 /* ("wakeup()" should not be called before we've 26233 * initialized SMP completely. Basically a not-yet 26234 * initialized SMP subsystem can be considered as a 26235 * not-yet working scheduler, simply dont use it before 26236 * it's up and running ...) 26237 * 26238 * SMP rescheduling is done in 2 passes: 26239 * - pass #1: faster: quick decisions 26240 * - pass #2: slower: let's try to find another CPU */ 26241 26242 /* Pass #1 26243 * 26244 * There are two metrics here: 26245 * 26246 * first, a 'cutoff' interval, currently 0-200 usecs on 26247 * x86 CPUs, depending on the size of the 'SMP-local 26248 * cache'. If the current process has longer average 26249 * timeslices than this, then we utilize the idle CPU. 26250 * 26251 * second, if the wakeup comes from a process context, 26252 * then the two processes are 'related'. (they form a 26253 * 'gang') 26254 * 26255 * An idle CPU is almost always a bad thing, thus we 26256 * skip the idle-CPU utilization only if both these 26257 * conditions are true. (ie. a 'process-gang' 26258 * rescheduling with rather high frequency should stay 26259 * on the same CPU). 26260 * 26261 * [We can switch to something more finegrained in 26262 * 2.3.] */ 26263 if ((current->avg_slice < cacheflush_time) && 26264 related(current, p)) 26265 return; 26266 26267 reschedule_idle_slow(p); 26268 #endif /* __SMP__ */ 26269 } 26270 26271 /* Careful! 26272 * 26273 * This has to add the process to the _beginning_ of the 26274 * run-queue, not the end. See the comment about "This is 26275 * subtle" in the scheduler proper.. */ 26276 static inline void add_to_runqueue(struct task_struct *p) 26277 { 26278 struct task_struct *next = init_task.next_run; 26279 26280 p->prev_run = &init_task; 26281 init_task.next_run = p; 26282 p->next_run = next; 26283 next->prev_run = p; 26284 nr_running++; 26285 } 26286 26287 static inline void del_from_runqueue( 26288 struct task_struct * p) 26289 { 26290 struct task_struct *next = p->next_run; 26291 struct task_struct *prev = p->prev_run; 26292 26293 nr_running--; 26294 next->prev_run = prev; 26295 prev->next_run = next; 26296 p->next_run = NULL; 26297 p->prev_run = NULL; 26298 } 26299 26300 static inline void move_last_runqueue( 26301 struct task_struct * p) 26302 { 26303 struct task_struct *next = p->next_run; 26304 struct task_struct *prev = p->prev_run; 26305 26306 /* remove from list */ 26307 next->prev_run = prev; 26308 prev->next_run = next; 26309 /* add back to list */ 26310 p->next_run = &init_task; 26311 prev = init_task.prev_run; 26312 init_task.prev_run = p; 26313 p->prev_run = prev; 26314 prev->next_run = p; 26315 } 26316 26317 static inline void 26318 move_first_runqueue(struct task_struct * p) 26319 { 26320 struct task_struct *next = p->next_run; 26321 struct task_struct *prev = p->prev_run; 26322 26323 /* remove from list */ 26324 next->prev_run = prev; 26325 prev->next_run = next; 26326 /* add back to list */ 26327 p->prev_run = &init_task; 26328 next = init_task.next_run; 26329 init_task.next_run = p; 26330 p->next_run = next; 26331 next->prev_run = p; 26332 } 26333 26334 /* The tasklist_lock protects the linked list of 26335 * processes. 26336 * 26337 * The scheduler lock is protecting against multiple 26338 * entry into the scheduling code, and doesn't need to 26339 * worry about interrupts (because interrupts cannot call 26340 * the scheduler). 26341 * 26342 * The run-queue lock locks the parts that actually 26343 * access and change the run-queues, and have to be 26344 * interrupt-safe. */ 26345 /* should be acquired first */ 26346 spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED; 26347 spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED; /* 2nd */ 26348 rwlock_t tasklist_lock = RW_LOCK_UNLOCKED; /* 3rd */ 26349 26350 /* Wake up a process. Put it on the run-queue if it's not 26351 * already there. The "current" process is always on the 26352 * run-queue (except when the actual re-schedule is in 26353 * progress), and as such you're allowed to do the 26354 * simpler "current->state = TASK_RUNNING" to mark 26355 * yourself runnable without the overhead of this. */ 26356 void wake_up_process(struct task_struct * p) 26357 { 26358 unsigned long flags; 26359 26360 spin_lock_irqsave(&runqueue_lock, flags); 26361 p->state = TASK_RUNNING; 26362 if (!p->next_run) { 26363 add_to_runqueue(p); 26364 reschedule_idle(p); 26365 } 26366 spin_unlock_irqrestore(&runqueue_lock, flags); 26367 } 26368 26369 static void process_timeout(unsigned long __data) 26370 { 26371 struct task_struct * p = (struct task_struct *) __data; 26372 26373 wake_up_process(p); 26374 } 26375 26376 /* This is the function that decides how desirable a 26377 * process is.. You can weigh different processes 26378 * against each other depending on what CPU they've run 26379 * on lately etc to try to handle cache and TLB miss 26380 * penalties. 26381 * 26382 * Return values: 26383 * -1000: never select this 26384 * 0: out of time, recalculate counters 26385 * (but it might still be selected) 26386 * +ve: "goodness" value (the larger, the better) 26387 * +1000: realtime process, select this. */ 26388 static inline int goodness(struct task_struct * p, 26389 struct task_struct * prev, int this_cpu) 26390 { 26391 int policy = p->policy; 26392 int weight; 26393




Содержание  Назад  Вперед