File: /Users/paulross/dev/linux/linux-3.13/arch/x86/include/asm/preempt.h

Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.

       1: #ifndef __ASM_PREEMPT_H
       2: #define __ASM_PREEMPT_H
       3: 
       4: #include <asm/rmwcc.h>
       5: #include <asm/percpu.h>
       6: #include <linux/thread_info.h>
       7: 
       8: DECLARE_PER_CPU(int, __preempt_count);
       9: 
      10: /*
      11:  * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
      12:  * that a decrement hitting 0 means we can and should reschedule.
      13:  */
      14: #define PREEMPT_ENABLED    (0 + PREEMPT_NEED_RESCHED)
      15: 
      16: /*
      17:  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
      18:  * that think a non-zero value indicates we cannot preempt.
      19:  */
      20: static __always_inline int preempt_count(void)
      21: {
      22:     return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
      23: }
      24: 
      25: static __always_inline void preempt_count_set(int pc)
      26: {
      27:     __this_cpu_write_4(__preempt_count, pc);
      28: }
      29: 
      30: /*
      31:  * must be macros to avoid header recursion hell
      32:  */
      33: #define task_preempt_count(p) \
      34:     (task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED)
      35: 
      36: #define init_task_preempt_count(p) do { \
      37:     task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
      38: } while (0)
      39: 
      40: #define init_idle_preempt_count(p, cpu) do { \
      41:     task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
      42:     per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
      43: } while (0)
      44: 
      45: /*
      46:  * We fold the NEED_RESCHED bit into the preempt count such that
      47:  * preempt_enable() can decrement and test for needing to reschedule with a
      48:  * single instruction.
      49:  *
      50:  * We invert the actual bit, so that when the decrement hits 0 we know we both
      51:  * need to resched (the bit is cleared) and can resched (no preempt count).
      52:  */
      53: 
      54: static __always_inline void set_preempt_need_resched(void)
      55: {
      56:     __this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
      57: }
      58: 
      59: static __always_inline void clear_preempt_need_resched(void)
      60: {
      61:     __this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
      62: }
      63: 
      64: static __always_inline bool test_preempt_need_resched(void)
      65: {
      66:     return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
      67: }
      68: 
      69: /*
      70:  * The various preempt_count add/sub methods
      71:  */
      72: 
      73: static __always_inline void __preempt_count_add(int val)
      74: {
      75:     __this_cpu_add_4(__preempt_count, val);
      76: }
      77: 
      78: static __always_inline void __preempt_count_sub(int val)
      79: {
      80:     __this_cpu_add_4(__preempt_count, -val);
      81: }
      82: 
      83: /*
      84:  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
      85:  * a decrement which hits zero means we have no preempt_count and should
      86:  * reschedule.
      87:  */
      88: static __always_inline bool __preempt_count_dec_and_test(void)
      89: {
      90:     GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
      91: }
      92: 
      93: /*
      94:  * Returns true when we need to resched and can (barring IRQ state).
      95:  */
      96: static __always_inline bool should_resched(void)
      97: {
      98:     return unlikely(!__this_cpu_read_4(__preempt_count));
      99: }
     100: 
     101: #ifdef CONFIG_PREEMPT
     102:   extern asmlinkage void ___preempt_schedule(void);
     103: # define __preempt_schedule() asm ("call ___preempt_schedule")
     104:   extern asmlinkage void preempt_schedule(void);
     105: # ifdef CONFIG_CONTEXT_TRACKING
     106:     extern asmlinkage void ___preempt_schedule_context(void);
     107: #   define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
     108: # endif
     109: #endif
     110: 
     111: #endif /* __ASM_PREEMPT_H */
     112: