Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.
1: #ifndef __LINUX_SMP_H 2: #define __LINUX_SMP_H 3: 4: /* 5: * Generic SMP support 6: * Alan Cox. <alan@redhat.com> 7: */ 8: 9: #include <linux/errno.h> 10: #include <linux/types.h> 11: #include <linux/list.h> 12: #include <linux/cpumask.h> 13: #include <linux/init.h> 14: 15: extern void cpu_idle(void); 16: 17: typedef void (*smp_call_func_t)(void *info); 18: struct call_single_data { 19: struct list_head list; 20: smp_call_func_t func; 21: void *info; 22: u16 flags; 23: }; 24: 25: /* total number of cpus in this system (may exceed NR_CPUS) */ 26: extern unsigned int total_cpus; 27: 28: int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, 29: int wait); 30: 31: /* 32: * Call a function on all processors 33: */ 34: int on_each_cpu(smp_call_func_t func, void *info, int wait); 35: 36: /* 37: * Call a function on processors specified by mask, which might include 38: * the local one. 39: */ 40: void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 41: void *info, bool wait); 42: 43: /* 44: * Call a function on each processor for which the supplied function 45: * cond_func returns a positive value. This may include the local 46: * processor. 47: */ 48: void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), 49: smp_call_func_t func, void *info, bool wait, 50: gfp_t gfp_flags); 51: 52: void __smp_call_function_single(int cpuid, struct call_single_data *data, 53: int wait); 54: 55: #ifdef CONFIG_SMP 56: 57: #include <linux/preempt.h> 58: #include <linux/kernel.h> 59: #include <linux/compiler.h> 60: #include <linux/thread_info.h> 61: #include <asm/smp.h> 62: 63: /* 64: * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 65: * (defined in asm header): 66: */ 67: 68: /* 69: * stops all CPUs but the current one: 70: */ 71: extern void smp_send_stop(void); 72: 73: /* 74: * sends a 'reschedule' event to another CPU: 75: */ 76: extern void smp_send_reschedule(int cpu); 77: 78: 79: /* 80: * Prepare machine for booting other CPUs. 81: */ 82: extern void smp_prepare_cpus(unsigned int max_cpus); 83: 84: /* 85: * Bring a CPU up 86: */ 87: extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); 88: 89: /* 90: * Final polishing of CPUs 91: */ 92: extern void smp_cpus_done(unsigned int max_cpus); 93: 94: /* 95: * Call a function on all other processors 96: */ 97: int smp_call_function(smp_call_func_t func, void *info, int wait); 98: void smp_call_function_many(const struct cpumask *mask, 99: smp_call_func_t func, void *info, bool wait); 100: 101: int smp_call_function_any(const struct cpumask *mask, 102: smp_call_func_t func, void *info, int wait); 103: 104: void kick_all_cpus_sync(void); 105: 106: /* 107: * Generic and arch helpers 108: */ 109: void __init call_function_init(void); 110: void generic_smp_call_function_single_interrupt(void); 111: #define generic_smp_call_function_interrupt \ 112: generic_smp_call_function_single_interrupt 113: 114: /* 115: * Mark the boot cpu "online" so that it can call console drivers in 116: * printk() and can access its per-cpu storage. 117: */ 118: void smp_prepare_boot_cpu(void); 119: 120: extern unsigned int setup_max_cpus; 121: extern void __init setup_nr_cpu_ids(void); 122: extern void __init smp_init(void); 123: 124: #else /* !SMP */ 125: 126: static inline void smp_send_stop(void) { } 127: 128: /* 129: * These macros fold the SMP functionality into a single CPU system 130: */ 131: #define raw_smp_processor_id() 0 132: static inline int up_smp_call_function(smp_call_func_t func, void *info) 133: { 134: return 0; 135: } 136: #define smp_call_function(func, info, wait) \ 137: (up_smp_call_function(func, info)) 138: 139: static inline void smp_send_reschedule(int cpu) { } 140: #define smp_prepare_boot_cpu() do {} while (0) 141: #define smp_call_function_many(mask, func, info, wait) \ 142: (up_smp_call_function(func, info)) 143: static inline void call_function_init(void) { } 144: 145: static inline int 146: smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 147: void *info, int wait) 148: { 149: return smp_call_function_single(0, func, info, wait); 150: } 151: 152: static inline void kick_all_cpus_sync(void) { } 153: 154: #endif /* !SMP */ 155: 156: /* 157: * smp_processor_id(): get the current CPU ID. 158: * 159: * if DEBUG_PREEMPT is enabled then we check whether it is 160: * used in a preemption-safe way. (smp_processor_id() is safe 161: * if it's used in a preemption-off critical section, or in 162: * a thread that is bound to the current CPU.) 163: * 164: * NOTE: raw_smp_processor_id() is for internal use only 165: * (smp_processor_id() is the preferred variant), but in rare 166: * instances it might also be used to turn off false positives 167: * (i.e. smp_processor_id() use that the debugging code reports but 168: * which use for some reason is legal). Don't use this to hack around 169: * the warning message, as your code might not work under PREEMPT. 170: */ 171: #ifdef CONFIG_DEBUG_PREEMPT 172: extern unsigned int debug_smp_processor_id(void); 173: # define smp_processor_id() debug_smp_processor_id() 174: #else 175: # define smp_processor_id() raw_smp_processor_id() 176: #endif 177: 178: #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) 179: #define put_cpu() preempt_enable() 180: 181: /* 182: * Callback to arch code if there's nosmp or maxcpus=0 on the 183: * boot command line: 184: */ 185: extern void arch_disable_smp_support(void); 186: 187: void smp_setup_processor_id(void); 188: 189: #endif /* __LINUX_SMP_H */ 190: