Ядро Linux в комментариях




Kernel/sched.c - часть 4


26394 if (policy & SCHED_YIELD) { 26395 p->policy = policy & ~SCHED_YIELD; 26396 return 0; 26397 } 26398 26399 /* Realtime process, select the first one on the 26400 * runqueue (taking priorities within processes into 26401 * account). */ 26402 if (policy != SCHED_OTHER) 26403 return 1000 + p->rt_priority; 26404 26405 /* Give the process a first-approximation goodness 26406 * value according to the number of clock-ticks it has 26407 * left. 26408 * 26409 * Don't do any other calculations if the time slice is 26410 * over.. */ 26411 weight = p->counter; 26412 if (weight) { 26413 26414 #ifdef __SMP__ 26415 /* Give a largish advantage to the same processor... 26416 * (this is equivalent to penalizing other 26417 * processors) */ 26418 if (p->processor == this_cpu) 26419 weight += PROC_CHANGE_PENALTY; 26420 #endif 26421 26422 /* .. and a slight advantage to the current thread */ 26423 if (p->mm == prev->mm) 26424 weight += 1; 26425 weight += p->priority; 26426 } 26427 26428 return weight; 26429 } 26430 26431 /* Event timer code */ 26432 #define TVN_BITS 6 26433 #define TVR_BITS 8 26434 #define TVN_SIZE (1 << TVN_BITS) 26435 #define TVR_SIZE (1 << TVR_BITS) 26436 #define TVN_MASK (TVN_SIZE - 1) 26437 #define TVR_MASK (TVR_SIZE - 1) 26438 26439 struct timer_vec { 26440 int index; 26441 struct timer_list *vec[TVN_SIZE]; 26442 }; 26443 26444 struct timer_vec_root { 26445 int index; 26446 struct timer_list *vec[TVR_SIZE]; 26447 }; 26448 26449 static struct timer_vec tv5 = { 0 }; 26450 static struct timer_vec tv4 = { 0 }; 26451 static struct timer_vec tv3 = { 0 }; 26452 static struct timer_vec tv2 = { 0 }; 26453 static struct timer_vec_root tv1 = { 0 }; 26454 26455 static struct timer_vec * const tvecs[] = { 26456 (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5 26457 }; 26458 26459 #define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0])) 26460 26461 static unsigned long timer_jiffies = 0; 26462 26463 static inline void insert_timer(struct timer_list *timer, 26464 struct timer_list **vec, int idx) 26465 { 26466 if ((timer->next = vec[idx])) 26467 vec[idx]->prev = timer; 26468 vec[idx] = timer; 26469 timer->prev = (struct timer_list *)&vec[idx]; 26470 } 26471 26472 static inline void internal_add_timer( 26473 struct timer_list *timer) 26474 { 26475 /* must be cli-ed when calling this */ 26476 unsigned long expires = timer->expires; 26477 unsigned long idx = expires - timer_jiffies; 26478 26479 if (idx < TVR_SIZE) { 26480 int i = expires & TVR_MASK; 26481 insert_timer(timer, tv1.vec, i); 26482 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { 26483 int i = (expires >> TVR_BITS) & TVN_MASK; 26484 insert_timer(timer, tv2.vec, i); 26485 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { 26486 int i =(expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; 26487 insert_timer(timer, tv3.vec, i); 26488 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { 26489 int i = 26490 (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; 26491 insert_timer(timer, tv4.vec, i); 26492 } else if ((signed long) idx < 0) { 26493 /* can happen if you add a timer with expires == 26494 * jiffies, or you set a timer to go off in the past 26495 */ 26496 insert_timer(timer, tv1.vec, tv1.index); 26497 } else if (idx <= 0xffffffffUL) { 26498 int i = 26499 (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; 26500 insert_timer(timer, tv5.vec, i); 26501 } else { 26502 /* Can only get here on architectures with 64-bit 26503 * jiffies */ 26504 timer->next = timer->prev = timer; 26505 } 26506 } 26507 26508 spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED; 26509 26510 void add_timer(struct timer_list *timer) 26511 { 26512 unsigned long flags; 26513 26514 spin_lock_irqsave(&timerlist_lock, flags); 26515 if (timer->prev) 26516 goto bug; 26517 internal_add_timer(timer); 26518 out: 26519 spin_unlock_irqrestore(&timerlist_lock, flags); 26520 return; 26521 26522 bug: 26523 printk("bug: kernel timer added twice at %p.\n", 26524 __builtin_return_address(0)); 26525 goto out; 26526 } 26527 26528 static inline int detach_timer(struct timer_list *timer) 26529 { 26530 struct timer_list *prev = timer->prev; 26531 if (prev) { 26532 struct timer_list *next = timer->next; 26533 prev->next = next; 26534 if (next) 26535 next->prev = prev; 26536 return 1; 26537 } 26538 return 0; 26539 } 26540 26541 void mod_timer(struct timer_list *timer, 26542 unsigned long expires) 26543 { 26544 unsigned long flags; 26545 26546 spin_lock_irqsave(&timerlist_lock, flags); 26547 timer->expires = expires; 26548 detach_timer(timer); 26549 internal_add_timer(timer); 26550 spin_unlock_irqrestore(&timerlist_lock, flags); 26551 } 26552 26553 int del_timer(struct timer_list * timer) 26554 { 26555 int ret; 26556 unsigned long flags; 26557 26558 spin_lock_irqsave(&timerlist_lock, flags); 26559 ret = detach_timer(timer); 26560 timer->next = timer->prev = 0; 26561 spin_unlock_irqrestore(&timerlist_lock, flags); 26562 return ret; 26563 } 26564 26565 #ifdef __SMP__ 26566 26567 #define idle_task (task[cpu_number_map[this_cpu]]) 26568 #define can_schedule(p) (!(p)->has_cpu) 26569 26570 #else 26571 26572 #define idle_task (&init_task) 26573 #define can_schedule(p) (1) 26574 26575 #endif 26576 26577 signed long schedule_timeout(signed long timeout) 26578 { 26579 struct timer_list timer; 26580 unsigned long expire; 26581 26582 switch (timeout) 26583 { 26584 case MAX_SCHEDULE_TIMEOUT: 26585 /* These two special cases are useful to be 26586 * comfortable in the caller. Nothing more. We could 26587 * take MAX_SCHEDULE_TIMEOUT from one of the negative 26588 * value but I' d like to return a valid offset (>=0) 26589 * to allow the caller to do everything it want with 26590 * the retval. */ 26591 schedule(); 26592 goto out; 26593 default: 26594 /* Another bit of PARANOID. Note that the retval will 26595 * be 0 since no piece of kernel is supposed to do a 26596 * check for a negative retval of schedule_timeout() 26597 * (since it should never happens anyway). You just 26598 * have the printk() that will tell you if something 26599 * is gone wrong and where. */ 26600 if (timeout < 0) 26601 { 26602 printk(KERN_ERR "schedule_timeout: wrong timeout " 26603 "value %lx from %p\n", timeout, 26604 __builtin_return_address(0)); 26605 goto out; 26606 } 26607 } 26608 26609 expire = timeout + jiffies; 26610 26611 init_timer(&timer); 26612 timer.expires = expire; 26613 timer.data = (unsigned long) current; 26614 timer.function = process_timeout; 26615 26616 add_timer(&timer); 26617 schedule(); 26618 del_timer(&timer); 26619 26620 timeout = expire - jiffies; 26621 26622 out: 26623 return timeout < 0 ? 0 : timeout; 26624 } 26625 26626 /* This one aligns per-CPU data on cacheline boundaries. 26627 */ 26628 static union { 26629 struct schedule_data { 26630 struct task_struct * prev; 26631 long prevstate; 26632 cycles_t last_schedule; 26633 } schedule_data; 26634 char __pad [SMP_CACHE_BYTES]; 26635 } aligned_data [NR_CPUS] __cacheline_aligned = 26636 { {{&init_task,0}}}; 26637 26638 static inline void __schedule_tail (void) 26639 { 26640 #ifdef __SMP__ 26641 struct schedule_data * sched_data; 26642 26643 /* We might have switched CPUs: */ 26644 sched_data = 26645 &aligned_data[smp_processor_id()].schedule_data; 26646 26647 /* Subtle. In the rare event that we got a wakeup to 26648 * 'prev' just during the reschedule (this is possible, 26649 * the scheduler is pretty parallel), we should do 26650 * another reschedule in the next task's 26651 * context. schedule() will do the right thing next 26652 * time around. This is equivalent to 'delaying' the 26653 * wakeup until the reschedule has finished. */




Содержание  Назад  Вперед