1#ifndef _LINUX_U64_STATS_SYNC_H 2#define _LINUX_U64_STATS_SYNC_H 3 4/* 5 * To properly implement 64bits network statistics on 32bit and 64bit hosts, 6 * we provide a synchronization point, that is a noop on 64bit or UP kernels. 7 * 8 * Key points : 9 * 1) Use a seqcount on SMP 32bits, with low overhead. 10 * 2) Whole thing is a noop on 64bit arches or UP kernels. 11 * 3) Write side must ensure mutual exclusion or one seqcount update could 12 * be lost, thus blocking readers forever. 13 * If this synchronization point is not a mutex, but a spinlock or 14 * spinlock_bh() or disable_bh() : 15 * 3.1) Write side should not sleep. 16 * 3.2) Write side should not allow preemption. 17 * 3.3) If applicable, interrupts should be disabled. 18 * 19 * 4) If reader fetches several counters, there is no guarantee the whole values 20 * are consistent (remember point 1) : this is a noop on 64bit arches anyway) 21 * 22 * 5) readers are allowed to sleep or be preempted/interrupted : They perform 23 * pure reads. But if they have to fetch many values, it's better to not allow 24 * preemptions/interruptions to avoid many retries. 25 * 26 * 6) If counter might be written by an interrupt, readers should block interrupts. 27 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could 28 * read partial values) 29 * 30 * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and 31 * u64_stats_fetch_retry_irq() helpers 32 * 33 * Usage : 34 * 35 * Stats producer (writer) should use following template granted it already got 36 * an exclusive access to counters (a lock is already taken, or per cpu 37 * data is used [in a non preemptable context]) 38 * 39 * spin_lock_bh(...) or other synchronization to get exclusive access 40 * ... 41 * u64_stats_update_begin(&stats->syncp); 42 * stats->bytes64 += len; // non atomic operation 43 * stats->packets64++; // non atomic operation 44 * u64_stats_update_end(&stats->syncp); 45 * 46 * While a consumer (reader) should use following template to get consistent 47 * snapshot for each variable (but no guarantee on several ones) 48 * 49 * u64 tbytes, tpackets; 50 * unsigned int start; 51 * 52 * do { 53 * start = u64_stats_fetch_begin(&stats->syncp); 54 * tbytes = stats->bytes64; // non atomic operation 55 * tpackets = stats->packets64; // non atomic operation 56 * } while (u64_stats_fetch_retry(&stats->syncp, start)); 57 * 58 * 59 * Example of use in drivers/net/loopback.c, using per_cpu containers, 60 * in BH disabled context. 61 */ 62#include <linux/seqlock.h> 63 64struct u64_stats_sync { 65#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 66 seqcount_t seq; 67#endif 68}; 69 70 71static inline void u64_stats_init(struct u64_stats_sync *syncp) 72{ 73#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) 74 seqcount_init(&syncp->seq); 75#endif 76} 77 78static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) 79{ 80#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 81 write_seqcount_begin(&syncp->seq); 82#endif 83} 84 85static inline void u64_stats_update_end(struct u64_stats_sync *syncp) 86{ 87#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 88 write_seqcount_end(&syncp->seq); 89#endif 90} 91 92static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) 93{ 94#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 95 raw_write_seqcount_begin(&syncp->seq); 96#endif 97} 98 99static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) 100{ 101#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 102 raw_write_seqcount_end(&syncp->seq); 103#endif 104} 105 106static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 107{ 108#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 109 return read_seqcount_begin(&syncp->seq); 110#else 111 return 0; 112#endif 113} 114 115static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 116{ 117#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 118 preempt_disable(); 119#endif 120 return __u64_stats_fetch_begin(syncp); 121} 122 123static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 124 unsigned int start) 125{ 126#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 127 return read_seqcount_retry(&syncp->seq, start); 128#else 129 return false; 130#endif 131} 132 133static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 134 unsigned int start) 135{ 136#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 137 preempt_enable(); 138#endif 139 return __u64_stats_fetch_retry(syncp, start); 140} 141 142/* 143 * In case irq handlers can update u64 counters, readers can use following helpers 144 * - SMP 32bit arches use seqcount protection, irq safe. 145 * - UP 32bit must disable irqs. 146 * - 64bit have no problem atomically reading u64 values, irq safe. 147 */ 148static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) 149{ 150#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 151 local_irq_disable(); 152#endif 153 return __u64_stats_fetch_begin(syncp); 154} 155 156static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, 157 unsigned int start) 158{ 159#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) 160 local_irq_enable(); 161#endif 162 return __u64_stats_fetch_retry(syncp, start); 163} 164 165#endif /* _LINUX_U64_STATS_SYNC_H */ 166