Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.
1: /* 2: * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3: * 4: * This program is free software; you can redistribute it and/or modify 5: * it under the terms of the GNU General Public License as published by 6: * the Free Software Foundation; either version 2 of the License, or 7: * (at your option) any later version. 8: * 9: * This program is distributed in the hope that it will be useful, 10: * but WITHOUT ANY WARRANTY; without even the implied warranty of 11: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12: * GNU General Public License for more details. 13: * 14: * You should have received a copy of the GNU General Public License 15: * along with this program; if not, write to the Free Software 16: * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17: * 18: * Copyright IBM Corporation, 2008 19: * 20: * Author: Dipankar Sarma <dipankar@in.ibm.com> 21: * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm 22: * 23: * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24: * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25: * 26: * For detailed explanation of Read-Copy Update mechanism see - 27: * Documentation/RCU 28: */ 29: 30: #ifndef __LINUX_RCUTREE_H 31: #define __LINUX_RCUTREE_H 32: 33: extern void rcu_note_context_switch(int cpu); 34: extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); 35: extern void rcu_cpu_stall_reset(void); 36: 37: /* 38: * Note a virtualization-based context switch. This is simply a 39: * wrapper around rcu_note_context_switch(), which allows TINY_RCU 40: * to save a few bytes. 41: */ 42: static inline void rcu_virt_note_context_switch(int cpu) 43: { 44: rcu_note_context_switch(cpu); 45: } 46: 47: extern void synchronize_rcu_bh(void); 48: extern void synchronize_sched_expedited(void); 49: extern void synchronize_rcu_expedited(void); 50: 51: void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 52: 53: /** 54: * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period 55: * 56: * Wait for an RCU-bh grace period to elapse, but use a "big hammer" 57: * approach to force the grace period to end quickly. This consumes 58: * significant time on all CPUs and is unfriendly to real-time workloads, 59: * so is thus not recommended for any sort of common-case code. In fact, 60: * if you are using synchronize_rcu_bh_expedited() in a loop, please 61: * restructure your code to batch your updates, and then use a single 62: * synchronize_rcu_bh() instead. 63: * 64: * Note that it is illegal to call this function while holding any lock 65: * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 66: * to call this function from a CPU-hotplug notifier. Failing to observe 67: * these restriction will result in deadlock. 68: */ 69: static inline void synchronize_rcu_bh_expedited(void) 70: { 71: synchronize_sched_expedited(); 72: } 73: 74: extern void rcu_barrier(void); 75: extern void rcu_barrier_bh(void); 76: extern void rcu_barrier_sched(void); 77: 78: extern unsigned long rcutorture_testseq; 79: extern unsigned long rcutorture_vernum; 80: extern long rcu_batches_completed(void); 81: extern long rcu_batches_completed_bh(void); 82: extern long rcu_batches_completed_sched(void); 83: 84: extern void rcu_force_quiescent_state(void); 85: extern void rcu_bh_force_quiescent_state(void); 86: extern void rcu_sched_force_quiescent_state(void); 87: 88: extern void exit_rcu(void); 89: 90: extern void rcu_scheduler_starting(void); 91: extern int rcu_scheduler_active __read_mostly; 92: 93: extern bool rcu_is_watching(void); 94: 95: #endif /* __LINUX_RCUTREE_H */ 96: