Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.
1: #ifndef _X86_IRQFLAGS_H_ 2: #define _X86_IRQFLAGS_H_ 3: 4: #include <asm/processor-flags.h> 5: 6: #ifndef __ASSEMBLY__ 7: /* 8: * Interrupt control: 9: */ 10: 11: static inline unsigned long native_save_fl(void) 12: { 13: unsigned long flags; 14: 15: /* 16: * "=rm" is safe here, because "pop" adjusts the stack before 17: * it evaluates its effective address -- this is part of the 18: * documented behavior of the "pop" instruction. 19: */ 20: asm volatile("# __raw_save_flags\n\t" 21: "pushf ; pop %0" 22: : "=rm" (flags) 23: : /* no input */ 24: : "memory"); 25: 26: return flags; 27: } 28: 29: static inline void native_restore_fl(unsigned long flags) 30: { 31: asm volatile("push %0 ; popf" 32: : /* no output */ 33: :"g" (flags) 34: :"memory", "cc"); 35: } 36: 37: static inline void native_irq_disable(void) 38: { 39: asm volatile("cli": : :"memory"); 40: } 41: 42: static inline void native_irq_enable(void) 43: { 44: asm volatile("sti": : :"memory"); 45: } 46: 47: static inline void native_safe_halt(void) 48: { 49: asm volatile("sti; hlt": : :"memory"); 50: } 51: 52: static inline void native_halt(void) 53: { 54: asm volatile("hlt": : :"memory"); 55: } 56: 57: #endif 58: 59: #ifdef CONFIG_PARAVIRT 60: #include <asm/paravirt.h> 61: #else 62: #ifndef __ASSEMBLY__ 63: #include <linux/types.h> 64: 65: static inline notrace unsigned long arch_local_save_flags(void) 66: { 67: return native_save_fl(); 68: } 69: 70: static inline notrace void arch_local_irq_restore(unsigned long flags) 71: { 72: native_restore_fl(flags); 73: } 74: 75: static inline notrace void arch_local_irq_disable(void) 76: { 77: native_irq_disable(); 78: } 79: 80: static inline notrace void arch_local_irq_enable(void) 81: { 82: native_irq_enable(); 83: } 84: 85: /* 86: * Used in the idle loop; sti takes one instruction cycle 87: * to complete: 88: */ 89: static inline void arch_safe_halt(void) 90: { 91: native_safe_halt(); 92: } 93: 94: /* 95: * Used when interrupts are already enabled or to 96: * shutdown the processor: 97: */ 98: static inline void halt(void) 99: { 100: native_halt(); 101: } 102: 103: /* 104: * For spinlocks, etc: 105: */ 106: static inline notrace unsigned long arch_local_irq_save(void) 107: { 108: unsigned long flags = arch_local_save_flags(); 109: arch_local_irq_disable(); 110: return flags; 111: } 112: #else 113: 114: #define ENABLE_INTERRUPTS(x) sti 115: #define DISABLE_INTERRUPTS(x) cli 116: 117: #ifdef CONFIG_X86_64 118: #define SWAPGS swapgs 119: /* 120: * Currently paravirt can't handle swapgs nicely when we 121: * don't have a stack we can rely on (such as a user space 122: * stack). So we either find a way around these or just fault 123: * and emulate if a guest tries to call swapgs directly. 124: * 125: * Either way, this is a good way to document that we don't 126: * have a reliable stack. x86_64 only. 127: */ 128: #define SWAPGS_UNSAFE_STACK swapgs 129: 130: #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ 131: 132: #define INTERRUPT_RETURN iretq 133: #define USERGS_SYSRET64 \ 134: swapgs; \ 135: sysretq; 136: #define USERGS_SYSRET32 \ 137: swapgs; \ 138: sysretl 139: #define ENABLE_INTERRUPTS_SYSEXIT32 \ 140: swapgs; \ 141: sti; \ 142: sysexit 143: 144: #else 145: #define INTERRUPT_RETURN iret 146: #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit 147: #define GET_CR0_INTO_EAX movl %cr0, %eax 148: #endif 149: 150: 151: #endif /* __ASSEMBLY__ */ 152: #endif /* CONFIG_PARAVIRT */ 153: 154: #ifndef __ASSEMBLY__ 155: static inline int arch_irqs_disabled_flags(unsigned long flags) 156: { 157: return !(flags & X86_EFLAGS_IF); 158: } 159: 160: static inline int arch_irqs_disabled(void) 161: { 162: unsigned long flags = arch_local_save_flags(); 163: 164: return arch_irqs_disabled_flags(flags); 165: } 166: 167: #else 168: 169: #ifdef CONFIG_X86_64 170: #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk 171: #define ARCH_LOCKDEP_SYS_EXIT_IRQ \ 172: TRACE_IRQS_ON; \ 173: sti; \ 174: SAVE_REST; \ 175: LOCKDEP_SYS_EXIT; \ 176: RESTORE_REST; \ 177: cli; \ 178: TRACE_IRQS_OFF; 179: 180: #else 181: #define ARCH_LOCKDEP_SYS_EXIT \ 182: pushl %eax; \ 183: pushl %ecx; \ 184: pushl %edx; \ 185: call lockdep_sys_exit; \ 186: popl %edx; \ 187: popl %ecx; \ 188: popl %eax; 189: 190: #define ARCH_LOCKDEP_SYS_EXIT_IRQ 191: #endif 192: 193: #ifdef CONFIG_TRACE_IRQFLAGS 194: # define TRACE_IRQS_ON call trace_hardirqs_on_thunk; 195: # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; 196: #else 197: # define TRACE_IRQS_ON 198: # define TRACE_IRQS_OFF 199: #endif 200: #ifdef CONFIG_DEBUG_LOCK_ALLOC 201: # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT 202: # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ 203: # else 204: # define LOCKDEP_SYS_EXIT 205: # define LOCKDEP_SYS_EXIT_IRQ 206: # endif 207: 208: #endif /* __ASSEMBLY__ */ 209: #endif 210: