00001
00025 #ifndef _XENO_NUCLEUS_SCHED_H
00026 #define _XENO_NUCLEUS_SCHED_H
00027
00031 #include <nucleus/thread.h>
00032
00033 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00034
00035 #include <nucleus/schedqueue.h>
00036 #include <nucleus/sched-tp.h>
00037 #include <nucleus/sched-sporadic.h>
00038
00039
00040 #define XNKCOUT 0x80000000
00041 #define XNHTICK 0x40000000
00042 #define XNRPICK 0x20000000
00043 #define XNINTCK 0x10000000
00044 #define XNINIRQ 0x08000000
00045 #define XNSWLOCK 0x04000000
00046 #define XNRESCHED 0x02000000
00047 #define XNHDEFER 0x01000000
00048
00049 struct xnsched_rt {
00050 xnsched_queue_t runnable;
00051 #ifdef CONFIG_XENO_OPT_PRIOCPL
00052 xnsched_queue_t relaxed;
00053 #endif
00054 };
00055
00060 typedef struct xnsched {
00061
00062 xnflags_t status;
00063 int cpu;
00064 struct xnthread *curr;
00065 xnarch_cpumask_t resched;
00067 struct xnsched_rt rt;
00068 #ifdef CONFIG_XENO_OPT_SCHED_TP
00069 struct xnsched_tp tp;
00070 #endif
00071 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00072 struct xnsched_sporadic pss;
00073 #endif
00074
00075 xntimerq_t timerqueue;
00076 volatile unsigned inesting;
00077 struct xntimer htimer;
00078 struct xnthread *zombie;
00079 struct xnthread rootcb;
00081 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00082 struct xnthread *last;
00083 #endif
00084
00085 #ifdef CONFIG_XENO_HW_FPU
00086 struct xnthread *fpuholder;
00087 #endif
00088
00089 #ifdef CONFIG_XENO_OPT_WATCHDOG
00090 struct xntimer wdtimer;
00091 int wdcount;
00092 #endif
00093
00094 #ifdef CONFIG_XENO_OPT_STATS
00095 xnticks_t last_account_switch;
00096 xnstat_exectime_t *current_account;
00097 #endif
00098
00099 #ifdef CONFIG_XENO_OPT_PRIOCPL
00100 DECLARE_XNLOCK(rpilock);
00101 #endif
00102
00103 #ifdef CONFIG_XENO_OPT_PERVASIVE
00104 struct task_struct *gatekeeper;
00105 wait_queue_head_t gkwaitq;
00106 struct linux_semaphore gksync;
00107 struct xnthread *gktarget;
00108 #endif
00109
00110 } xnsched_t;
00111
00112 union xnsched_policy_param;
00113
00114 struct xnsched_class {
00115
00116 void (*sched_init)(struct xnsched *sched);
00117 void (*sched_enqueue)(struct xnthread *thread);
00118 void (*sched_dequeue)(struct xnthread *thread);
00119 void (*sched_requeue)(struct xnthread *thread);
00120 struct xnthread *(*sched_pick)(struct xnsched *sched);
00121 void (*sched_tick)(struct xnthread *curr);
00122 void (*sched_rotate)(struct xnsched *sched,
00123 const union xnsched_policy_param *p);
00124 void (*sched_migrate)(struct xnthread *thread,
00125 struct xnsched *sched);
00126 void (*sched_setparam)(struct xnthread *thread,
00127 const union xnsched_policy_param *p);
00128 void (*sched_getparam)(struct xnthread *thread,
00129 union xnsched_policy_param *p);
00130 void (*sched_trackprio)(struct xnthread *thread,
00131 const union xnsched_policy_param *p);
00132 int (*sched_declare)(struct xnthread *thread,
00133 const union xnsched_policy_param *p);
00134 void (*sched_forget)(struct xnthread *thread);
00135 #ifdef CONFIG_XENO_OPT_PRIOCPL
00136 struct xnthread *(*sched_push_rpi)(struct xnsched *sched,
00137 struct xnthread *thread);
00138 void (*sched_pop_rpi)(struct xnthread *thread);
00139 struct xnthread *(*sched_peek_rpi)(struct xnsched *sched);
00140 void (*sched_suspend_rpi)(struct xnthread *thread);
00141 void (*sched_resume_rpi)(struct xnthread *thread);
00142 #endif
00143 #ifdef CONFIG_PROC_FS
00144 void (*sched_init_proc)(struct proc_dir_entry *root);
00145 void (*sched_cleanup_proc)(struct proc_dir_entry *root);
00146 struct proc_dir_entry *proc;
00147 #endif
00148 int nthreads;
00149 struct xnsched_class *next;
00150 int weight;
00151 const char *name;
00152 };
00153
00154 #define XNSCHED_CLASS_MAX_THREADS 32768
00155 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_MAX_THREADS)
00156
00157
00158 #define XNSCHED_RUNPRIO 0x80000000
00159
00160 #ifdef CONFIG_SMP
00161 #define xnsched_cpu(__sched__) ((__sched__)->cpu)
00162 #else
00163 #define xnsched_cpu(__sched__) ({ (void)__sched__; 0; })
00164 #endif
00165
00166
00167 static inline int xnsched_resched_p(struct xnsched *sched)
00168 {
00169 return !xnarch_cpus_empty(sched->resched);
00170 }
00171
00172 static inline int xnsched_self_resched_p(struct xnsched *sched)
00173 {
00174 return xnarch_cpu_isset(xnsched_cpu(sched), sched->resched);
00175 }
00176
00177
00178 #define xnsched_set_self_resched(__sched__) do { \
00179 xnarch_cpu_set(xnsched_cpu(__sched__), (__sched__)->resched); \
00180 setbits((__sched__)->status, XNRESCHED); \
00181 } while (0)
00182
00183
00184 #define xnsched_set_resched(__sched__) do { \
00185 xnsched_t *current_sched = xnpod_current_sched(); \
00186 xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched); \
00187 setbits(current_sched->status, XNRESCHED); \
00188 } while (0)
00189
00190 void xnsched_zombie_hooks(struct xnthread *thread);
00191
00192 void __xnsched_finalize_zombie(struct xnsched *sched);
00193
00194 static inline void xnsched_finalize_zombie(struct xnsched *sched)
00195 {
00196 if (sched->zombie)
00197 __xnsched_finalize_zombie(sched);
00198 }
00199
00200 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00201
00202 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
00203
00204 #define xnsched_resched_after_unlocked_switch() xnpod_schedule()
00205
00206 static inline
00207 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
00208 {
00209 return testbits(sched->status, XNRESCHED);
00210 }
00211
00212 #else
00213
00214 #ifdef CONFIG_SMP
00215 #define xnsched_finish_unlocked_switch(__sched__) \
00216 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00217 xnpod_current_sched(); })
00218 #else
00219 #define xnsched_finish_unlocked_switch(__sched__) \
00220 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00221 (__sched__); })
00222 #endif
00223
00224 #define xnsched_resched_after_unlocked_switch() do { } while(0)
00225
00226 #define xnsched_maybe_resched_after_unlocked_switch(sched) \
00227 ({ (void)(sched); 0; })
00228
00229 #endif
00230
00231 #ifdef CONFIG_XENO_OPT_WATCHDOG
00232 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00233 {
00234 sched->wdcount = 0;
00235 }
00236 #else
00237 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00238 {
00239 }
00240 #endif
00241
00242 #include <nucleus/sched-idle.h>
00243 #include <nucleus/sched-rt.h>
00244
00245 void xnsched_init_proc(void);
00246
00247 void xnsched_cleanup_proc(void);
00248
00249 void xnsched_register_classes(void);
00250
00251 void xnsched_init(struct xnsched *sched, int cpu);
00252
00253 void xnsched_destroy(struct xnsched *sched);
00254
00255 struct xnthread *xnsched_pick_next(struct xnsched *sched);
00256
00257 void xnsched_putback(struct xnthread *thread);
00258
00259 int xnsched_set_policy(struct xnthread *thread,
00260 struct xnsched_class *sched_class,
00261 const union xnsched_policy_param *p);
00262
00263 void xnsched_track_policy(struct xnthread *thread,
00264 struct xnthread *target);
00265
00266 void xnsched_migrate(struct xnthread *thread,
00267 struct xnsched *sched);
00268
00269 void xnsched_migrate_passive(struct xnthread *thread,
00270 struct xnsched *sched);
00271
00303 static inline void xnsched_rotate(struct xnsched *sched,
00304 struct xnsched_class *sched_class,
00305 const union xnsched_policy_param *sched_param)
00306 {
00307 sched_class->sched_rotate(sched, sched_param);
00308 }
00309
00310 static inline int xnsched_init_tcb(struct xnthread *thread)
00311 {
00312 int ret = 0;
00313
00314 xnsched_idle_init_tcb(thread);
00315 xnsched_rt_init_tcb(thread);
00316 #ifdef CONFIG_XENO_OPT_SCHED_TP
00317 ret = xnsched_tp_init_tcb(thread);
00318 if (ret)
00319 return ret;
00320 #endif
00321 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00322 ret = xnsched_sporadic_init_tcb(thread);
00323 if (ret)
00324 return ret;
00325 #endif
00326 return ret;
00327 }
00328
00329 static inline int xnsched_root_priority(struct xnsched *sched)
00330 {
00331 return sched->rootcb.cprio;
00332 }
00333
00334 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
00335 {
00336 return sched->rootcb.sched_class;
00337 }
00338
00339 static inline void xnsched_tick(struct xnthread *curr, struct xntbase *tbase)
00340 {
00341 struct xnsched_class *sched_class = curr->sched_class;
00342
00343
00344
00345
00346
00347 if (xnthread_time_base(curr) == tbase &&
00348 sched_class != &xnsched_class_idle &&
00349 sched_class == curr->base_class &&
00350 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == XNRRB)
00351 sched_class->sched_tick(curr);
00352 }
00353
00354 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
00355
00356 static inline void xnsched_enqueue(struct xnthread *thread)
00357 {
00358 struct xnsched_class *sched_class = thread->sched_class;
00359
00360 if (sched_class != &xnsched_class_idle)
00361 sched_class->sched_enqueue(thread);
00362 }
00363
00364 static inline void xnsched_dequeue(struct xnthread *thread)
00365 {
00366 struct xnsched_class *sched_class = thread->sched_class;
00367
00368 if (sched_class != &xnsched_class_idle)
00369 sched_class->sched_dequeue(thread);
00370 }
00371
00372 static inline void xnsched_requeue(struct xnthread *thread)
00373 {
00374 struct xnsched_class *sched_class = thread->sched_class;
00375
00376 if (sched_class != &xnsched_class_idle)
00377 sched_class->sched_requeue(thread);
00378 }
00379
00380 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00381 {
00382 return thread->bprio + thread->sched_class->weight;
00383 }
00384
00385 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00386 {
00387 return thread->cprio + thread->sched_class->weight;
00388 }
00389
00390 static inline void xnsched_setparam(struct xnthread *thread,
00391 const union xnsched_policy_param *p)
00392 {
00393 thread->sched_class->sched_setparam(thread, p);
00394 }
00395
00396 static inline void xnsched_getparam(struct xnthread *thread,
00397 union xnsched_policy_param *p)
00398 {
00399 thread->sched_class->sched_getparam(thread, p);
00400 }
00401
00402 static inline void xnsched_trackprio(struct xnthread *thread,
00403 const union xnsched_policy_param *p)
00404 {
00405 thread->sched_class->sched_trackprio(thread, p);
00406 }
00407
00408 static inline void xnsched_forget(struct xnthread *thread)
00409 {
00410 struct xnsched_class *sched_class = thread->base_class;
00411
00412 --sched_class->nthreads;
00413
00414 if (sched_class->sched_forget)
00415 sched_class->sched_forget(thread);
00416 }
00417
00418 #ifdef CONFIG_XENO_OPT_PRIOCPL
00419
00420 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00421 struct xnthread *thread)
00422 {
00423 return thread->sched_class->sched_push_rpi(sched, thread);
00424 }
00425
00426 static inline void xnsched_pop_rpi(struct xnthread *thread)
00427 {
00428 thread->sched_class->sched_pop_rpi(thread);
00429 }
00430
00431 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00432 {
00433 struct xnsched_class *sched_class = thread->sched_class;
00434
00435 if (sched_class->sched_suspend_rpi)
00436 sched_class->sched_suspend_rpi(thread);
00437 }
00438
00439 static inline void xnsched_resume_rpi(struct xnthread *thread)
00440 {
00441 struct xnsched_class *sched_class = thread->sched_class;
00442
00443 if (sched_class->sched_resume_rpi)
00444 sched_class->sched_resume_rpi(thread);
00445 }
00446
00447 #endif
00448
00449 #else
00450
00451
00452
00453
00454
00455
00456 static inline void xnsched_enqueue(struct xnthread *thread)
00457 {
00458 struct xnsched_class *sched_class = thread->sched_class;
00459
00460 if (sched_class != &xnsched_class_idle)
00461 __xnsched_rt_enqueue(thread);
00462 }
00463
00464 static inline void xnsched_dequeue(struct xnthread *thread)
00465 {
00466 struct xnsched_class *sched_class = thread->sched_class;
00467
00468 if (sched_class != &xnsched_class_idle)
00469 __xnsched_rt_dequeue(thread);
00470 }
00471
00472 static inline void xnsched_requeue(struct xnthread *thread)
00473 {
00474 struct xnsched_class *sched_class = thread->sched_class;
00475
00476 if (sched_class != &xnsched_class_idle)
00477 __xnsched_rt_requeue(thread);
00478 }
00479
00480 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00481 {
00482 return thread->bprio;
00483 }
00484
00485 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00486 {
00487 return thread->cprio;
00488 }
00489
00490 static inline void xnsched_setparam(struct xnthread *thread,
00491 const union xnsched_policy_param *p)
00492 {
00493 struct xnsched_class *sched_class = thread->sched_class;
00494
00495 if (sched_class != &xnsched_class_idle)
00496 __xnsched_rt_setparam(thread, p);
00497 else
00498 __xnsched_idle_setparam(thread, p);
00499 }
00500
00501 static inline void xnsched_getparam(struct xnthread *thread,
00502 union xnsched_policy_param *p)
00503 {
00504 struct xnsched_class *sched_class = thread->sched_class;
00505
00506 if (sched_class != &xnsched_class_idle)
00507 __xnsched_rt_getparam(thread, p);
00508 else
00509 __xnsched_idle_getparam(thread, p);
00510 }
00511
00512 static inline void xnsched_trackprio(struct xnthread *thread,
00513 const union xnsched_policy_param *p)
00514 {
00515 struct xnsched_class *sched_class = thread->sched_class;
00516
00517 if (sched_class != &xnsched_class_idle)
00518 __xnsched_rt_trackprio(thread, p);
00519 else
00520 __xnsched_idle_trackprio(thread, p);
00521 }
00522
00523 static inline void xnsched_forget(struct xnthread *thread)
00524 {
00525 --thread->base_class->nthreads;
00526 __xnsched_rt_forget(thread);
00527 }
00528
00529 #ifdef CONFIG_XENO_OPT_PRIOCPL
00530
00531 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00532 struct xnthread *thread)
00533 {
00534 return __xnsched_rt_push_rpi(sched, thread);
00535 }
00536
00537 static inline void xnsched_pop_rpi(struct xnthread *thread)
00538 {
00539 __xnsched_rt_pop_rpi(thread);
00540 }
00541
00542 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00543 {
00544 __xnsched_rt_suspend_rpi(thread);
00545 }
00546
00547 static inline void xnsched_resume_rpi(struct xnthread *thread)
00548 {
00549 __xnsched_rt_resume_rpi(thread);
00550 }
00551
00552 #endif
00553
00554 #endif
00555
00556 void xnsched_renice_root(struct xnsched *sched,
00557 struct xnthread *target);
00558
00559 struct xnthread *xnsched_peek_rpi(struct xnsched *sched);
00560
00561 #else
00562
00563 #include <nucleus/sched-idle.h>
00564 #include <nucleus/sched-rt.h>
00565
00566 #endif
00567
00570 #endif