1
2#ifndef _LINUX_KERNEL_H
3#define _LINUX_KERNEL_H
4
5
6#include <stdarg.h>
7#include <linux/limits.h>
8#include <linux/linkage.h>
9#include <linux/stddef.h>
10#include <linux/types.h>
11#include <linux/compiler.h>
12#include <linux/bitops.h>
13#include <linux/log2.h>
14#include <linux/typecheck.h>
15#include <linux/printk.h>
16#include <linux/build_bug.h>
17#include <asm/byteorder.h>
18#include <asm/div64.h>
19#include <uapi/linux/kernel.h>
20#include <asm/div64.h>
21
22#define STACK_MAGIC 0xdeadbeef
23
24
25
26
27
28
29
30#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
31
32
33#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
34#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
35#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
36#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
37#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
38
39
40#define READ 0
41#define WRITE 1
42
43
44
45
46
47#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
48
49#define u64_to_user_ptr(x) ( \
50{ \
51 typecheck(u64, (x)); \
52 (void __user *)(uintptr_t)(x); \
53} \
54)
55
56
57
58
59
60
61
62#define __round_mask(x, y) ((__typeof__(x))((y)-1))
63
64
65
66
67
68
69
70
71#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
72
73
74
75
76
77
78
79
80#define round_down(x, y) ((x) & ~__round_mask(x, y))
81
82
83
84
85
86
87
88
89#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
90
91#define typeof_member(T, m) typeof(((T*)0)->m)
92
93#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
94
95#define DIV_ROUND_DOWN_ULL(ll, d) \
96 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
97
98#define DIV_ROUND_UP_ULL(ll, d) \
99 DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
100
101#if BITS_PER_LONG == 32
102# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
103#else
104# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
105#endif
106
107
108
109
110
111
112
113
114
115#define roundup(x, y) ( \
116{ \
117 typeof(y) __y = y; \
118 (((x) + (__y - 1)) / __y) * __y; \
119} \
120)
121
122
123
124
125
126
127
128
129#define rounddown(x, y) ( \
130{ \
131 typeof(x) __x = (x); \
132 __x - (__x % (y)); \
133} \
134)
135
136
137
138
139
140
141
142#define DIV_ROUND_CLOSEST(x, divisor)( \
143{ \
144 typeof(x) __x = x; \
145 typeof(divisor) __d = divisor; \
146 (((typeof(x))-1) > 0 || \
147 ((typeof(divisor))-1) > 0 || \
148 (((__x) > 0) == ((__d) > 0))) ? \
149 (((__x) + ((__d) / 2)) / (__d)) : \
150 (((__x) - ((__d) / 2)) / (__d)); \
151} \
152)
153
154
155
156
157#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
158{ \
159 typeof(divisor) __d = divisor; \
160 unsigned long long _tmp = (x) + (__d) / 2; \
161 do_div(_tmp, __d); \
162 _tmp; \
163} \
164)
165
166
167
168
169
170#define mult_frac(x, numer, denom)( \
171{ \
172 typeof(x) quot = (x) / (denom); \
173 typeof(x) rem = (x) % (denom); \
174 (quot * (numer)) + ((rem * (numer)) / (denom)); \
175} \
176)
177
178
179#define _RET_IP_ (unsigned long)__builtin_return_address(0)
180#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
181
182#define sector_div(a, b) do_div(a, b)
183
184
185
186
187
188
189
190
191
192#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
193
194
195
196
197
198#define lower_32_bits(n) ((u32)(n))
199
200struct completion;
201struct pt_regs;
202struct user;
203
204#ifdef CONFIG_PREEMPT_VOLUNTARY
205extern int _cond_resched(void);
206# define might_resched() _cond_resched()
207#else
208# define might_resched() do { } while (0)
209#endif
210
211#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
212extern void ___might_sleep(const char *file, int line, int preempt_offset);
213extern void __might_sleep(const char *file, int line, int preempt_offset);
214extern void __cant_sleep(const char *file, int line, int preempt_offset);
215
216
217
218
219
220
221
222
223
224
225
226# define might_sleep() \
227 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
228
229
230
231
232
233# define cant_sleep() \
234 do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
235# define sched_annotate_sleep() (current->task_state_change = 0)
236#else
237 static inline void ___might_sleep(const char *file, int line,
238 int preempt_offset) { }
239 static inline void __might_sleep(const char *file, int line,
240 int preempt_offset) { }
241# define might_sleep() do { might_resched(); } while (0)
242# define cant_sleep() do { } while (0)
243# define sched_annotate_sleep() do { } while (0)
244#endif
245
246#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
247
248
249
250
251
252
253
254
255
256#define abs(x) __abs_choose_expr(x, long long, \
257 __abs_choose_expr(x, long, \
258 __abs_choose_expr(x, int, \
259 __abs_choose_expr(x, short, \
260 __abs_choose_expr(x, char, \
261 __builtin_choose_expr( \
262 __builtin_types_compatible_p(typeof(x), char), \
263 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
264 ((void)0)))))))
265
266#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
267 __builtin_types_compatible_p(typeof(x), signed type) || \
268 __builtin_types_compatible_p(typeof(x), unsigned type), \
269 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
286{
287 return (u32)(((u64) val * ep_ro) >> 32);
288}
289
290#if defined(CONFIG_MMU) && \
291 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
292#define might_fault() __might_fault(__FILE__, __LINE__)
293void __might_fault(const char *file, int line);
294#else
295static inline void might_fault(void) { }
296#endif
297
298extern struct atomic_notifier_head panic_notifier_list;
299extern long (*panic_blink)(int state);
300__printf(1, 2)
301void panic(const char *fmt, ...) __noreturn __cold;
302void nmi_panic(struct pt_regs *regs, const char *msg);
303extern void oops_enter(void);
304extern void oops_exit(void);
305void print_oops_end_marker(void);
306extern int oops_may_print(void);
307void do_exit(long error_code) __noreturn;
308void complete_and_exit(struct completion *, long) __noreturn;
309
310#ifdef CONFIG_ARCH_HAS_REFCOUNT
311void refcount_error_report(struct pt_regs *regs, const char *err);
312#else
313static inline void refcount_error_report(struct pt_regs *regs, const char *err)
314{ }
315#endif
316
317
318int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
319int __must_check _kstrtol(const char *s, unsigned int base, long *res);
320
321int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
322int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
341{
342
343
344
345
346 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
347 __alignof__(unsigned long) == __alignof__(unsigned long long))
348 return kstrtoull(s, base, (unsigned long long *)res);
349 else
350 return _kstrtoul(s, base, res);
351}
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
370{
371
372
373
374
375 if (sizeof(long) == sizeof(long long) &&
376 __alignof__(long) == __alignof__(long long))
377 return kstrtoll(s, base, (long long *)res);
378 else
379 return _kstrtol(s, base, res);
380}
381
382int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
383int __must_check kstrtoint(const char *s, unsigned int base, int *res);
384
385static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
386{
387 return kstrtoull(s, base, res);
388}
389
390static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
391{
392 return kstrtoll(s, base, res);
393}
394
395static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
396{
397 return kstrtouint(s, base, res);
398}
399
400static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
401{
402 return kstrtoint(s, base, res);
403}
404
405int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
406int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
407int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
408int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
409int __must_check kstrtobool(const char *s, bool *res);
410
411int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
412int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
413int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
414int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
415int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
416int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
417int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
418int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
419int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
420int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
421int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
422
423static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
424{
425 return kstrtoull_from_user(s, count, base, res);
426}
427
428static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
429{
430 return kstrtoll_from_user(s, count, base, res);
431}
432
433static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
434{
435 return kstrtouint_from_user(s, count, base, res);
436}
437
438static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
439{
440 return kstrtoint_from_user(s, count, base, res);
441}
442
443
444
445extern unsigned long simple_strtoul(const char *,char **,unsigned int);
446extern long simple_strtol(const char *,char **,unsigned int);
447extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
448extern long long simple_strtoll(const char *,char **,unsigned int);
449
450extern int num_to_str(char *buf, int size,
451 unsigned long long num, unsigned int width);
452
453
454
455extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
456extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
457extern __printf(3, 4)
458int snprintf(char *buf, size_t size, const char *fmt, ...);
459extern __printf(3, 0)
460int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
461extern __printf(3, 4)
462int scnprintf(char *buf, size_t size, const char *fmt, ...);
463extern __printf(3, 0)
464int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
465extern __printf(2, 3) __malloc
466char *kasprintf(gfp_t gfp, const char *fmt, ...);
467extern __printf(2, 0) __malloc
468char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
469extern __printf(2, 0)
470const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
471
472extern __scanf(2, 3)
473int sscanf(const char *, const char *, ...);
474extern __scanf(2, 0)
475int vsscanf(const char *, const char *, va_list);
476
477extern int get_option(char **str, int *pint);
478extern char *get_options(const char *str, int nints, int *ints);
479extern unsigned long long memparse(const char *ptr, char **retptr);
480extern bool parse_option_str(const char *str, const char *option);
481extern char *next_arg(char *args, char **param, char **val);
482
483extern int core_kernel_text(unsigned long addr);
484extern int init_kernel_text(unsigned long addr);
485extern int core_kernel_data(unsigned long addr);
486extern int __kernel_text_address(unsigned long addr);
487extern int kernel_text_address(unsigned long addr);
488extern int func_ptr_is_kernel_text(void *ptr);
489
490u64 int_pow(u64 base, unsigned int exp);
491unsigned long int_sqrt(unsigned long);
492
493#if BITS_PER_LONG < 64
494u32 int_sqrt64(u64 x);
495#else
496static inline u32 int_sqrt64(u64 x)
497{
498 return (u32)int_sqrt(x);
499}
500#endif
501
502extern void bust_spinlocks(int yes);
503extern int oops_in_progress;
504extern int panic_timeout;
505extern unsigned long panic_print;
506extern int panic_on_oops;
507extern int panic_on_unrecovered_nmi;
508extern int panic_on_io_nmi;
509extern int panic_on_warn;
510extern int sysctl_panic_on_rcu_stall;
511extern int sysctl_panic_on_stackoverflow;
512
513extern bool crash_kexec_post_notifiers;
514
515
516
517
518
519
520extern atomic_t panic_cpu;
521#define PANIC_CPU_INVALID -1
522
523
524
525
526
527static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
528{
529 if (panic_timeout == arch_default_timeout)
530 panic_timeout = timeout;
531}
532extern const char *print_tainted(void);
533enum lockdep_ok {
534 LOCKDEP_STILL_OK,
535 LOCKDEP_NOW_UNRELIABLE
536};
537extern void add_taint(unsigned flag, enum lockdep_ok);
538extern int test_taint(unsigned flag);
539extern unsigned long get_taint(void);
540extern int root_mountflags;
541
542extern bool early_boot_irqs_disabled;
543
544
545
546
547
548extern enum system_states {
549 SYSTEM_BOOTING,
550 SYSTEM_SCHEDULING,
551 SYSTEM_RUNNING,
552 SYSTEM_HALT,
553 SYSTEM_POWER_OFF,
554 SYSTEM_RESTART,
555 SYSTEM_SUSPEND,
556} system_state;
557
558
559#define TAINT_PROPRIETARY_MODULE 0
560#define TAINT_FORCED_MODULE 1
561#define TAINT_CPU_OUT_OF_SPEC 2
562#define TAINT_FORCED_RMMOD 3
563#define TAINT_MACHINE_CHECK 4
564#define TAINT_BAD_PAGE 5
565#define TAINT_USER 6
566#define TAINT_DIE 7
567#define TAINT_OVERRIDDEN_ACPI_TABLE 8
568#define TAINT_WARN 9
569#define TAINT_CRAP 10
570#define TAINT_FIRMWARE_WORKAROUND 11
571#define TAINT_OOT_MODULE 12
572#define TAINT_UNSIGNED_MODULE 13
573#define TAINT_SOFTLOCKUP 14
574#define TAINT_LIVEPATCH 15
575#define TAINT_AUX 16
576#define TAINT_RANDSTRUCT 17
577#define TAINT_FLAGS_COUNT 18
578
579struct taint_flag {
580 char c_true;
581 char c_false;
582 bool module;
583};
584
585extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
586
587extern const char hex_asc[];
588#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
589#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
590
591static inline char *hex_byte_pack(char *buf, u8 byte)
592{
593 *buf++ = hex_asc_hi(byte);
594 *buf++ = hex_asc_lo(byte);
595 return buf;
596}
597
598extern const char hex_asc_upper[];
599#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
600#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
601
602static inline char *hex_byte_pack_upper(char *buf, u8 byte)
603{
604 *buf++ = hex_asc_upper_hi(byte);
605 *buf++ = hex_asc_upper_lo(byte);
606 return buf;
607}
608
609extern int hex_to_bin(char ch);
610extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
611extern char *bin2hex(char *dst, const void *src, size_t count);
612
613bool mac_pton(const char *s, u8 *mac);
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635enum ftrace_dump_mode {
636 DUMP_NONE,
637 DUMP_ALL,
638 DUMP_ORIG,
639};
640
641#ifdef CONFIG_TRACING
642void tracing_on(void);
643void tracing_off(void);
644int tracing_is_on(void);
645void tracing_snapshot(void);
646void tracing_snapshot_alloc(void);
647
648extern void tracing_start(void);
649extern void tracing_stop(void);
650
651static inline __printf(1, 2)
652void ____trace_printk_check_format(const char *fmt, ...)
653{
654}
655#define __trace_printk_check_format(fmt, args...) \
656do { \
657 if (0) \
658 ____trace_printk_check_format(fmt, ##args); \
659} while (0)
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691#define trace_printk(fmt, ...) \
692do { \
693 char _______STR[] = __stringify((__VA_ARGS__)); \
694 if (sizeof(_______STR) > 3) \
695 do_trace_printk(fmt, ##__VA_ARGS__); \
696 else \
697 trace_puts(fmt); \
698} while (0)
699
700#define do_trace_printk(fmt, args...) \
701do { \
702 static const char *trace_printk_fmt __used \
703 __attribute__((section("__trace_printk_fmt"))) = \
704 __builtin_constant_p(fmt) ? fmt : NULL; \
705 \
706 __trace_printk_check_format(fmt, ##args); \
707 \
708 if (__builtin_constant_p(fmt)) \
709 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
710 else \
711 __trace_printk(_THIS_IP_, fmt, ##args); \
712} while (0)
713
714extern __printf(2, 3)
715int __trace_bprintk(unsigned long ip, const char *fmt, ...);
716
717extern __printf(2, 3)
718int __trace_printk(unsigned long ip, const char *fmt, ...);
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745#define trace_puts(str) ({ \
746 static const char *trace_printk_fmt __used \
747 __attribute__((section("__trace_printk_fmt"))) = \
748 __builtin_constant_p(str) ? str : NULL; \
749 \
750 if (__builtin_constant_p(str)) \
751 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
752 else \
753 __trace_puts(_THIS_IP_, str, strlen(str)); \
754})
755extern int __trace_bputs(unsigned long ip, const char *str);
756extern int __trace_puts(unsigned long ip, const char *str, int size);
757
758extern void trace_dump_stack(int skip);
759
760
761
762
763
764
765#define ftrace_vprintk(fmt, vargs) \
766do { \
767 if (__builtin_constant_p(fmt)) { \
768 static const char *trace_printk_fmt __used \
769 __attribute__((section("__trace_printk_fmt"))) = \
770 __builtin_constant_p(fmt) ? fmt : NULL; \
771 \
772 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
773 } else \
774 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
775} while (0)
776
777extern __printf(2, 0) int
778__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
779
780extern __printf(2, 0) int
781__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
782
783extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
784#else
785static inline void tracing_start(void) { }
786static inline void tracing_stop(void) { }
787static inline void trace_dump_stack(int skip) { }
788
789static inline void tracing_on(void) { }
790static inline void tracing_off(void) { }
791static inline int tracing_is_on(void) { return 0; }
792static inline void tracing_snapshot(void) { }
793static inline void tracing_snapshot_alloc(void) { }
794
795static inline __printf(1, 2)
796int trace_printk(const char *fmt, ...)
797{
798 return 0;
799}
800static __printf(1, 0) inline int
801ftrace_vprintk(const char *fmt, va_list ap)
802{
803 return 0;
804}
805static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
806#endif
807
808
809
810
811
812
813
814
815
816
817
818
819
820#define __typecheck(x, y) \
821 (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
822
823
824
825
826
827
828#define __is_constexpr(x) \
829 (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
830
831#define __no_side_effects(x, y) \
832 (__is_constexpr(x) && __is_constexpr(y))
833
834#define __safe_cmp(x, y) \
835 (__typecheck(x, y) && __no_side_effects(x, y))
836
837#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
838
839#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
840 typeof(x) unique_x = (x); \
841 typeof(y) unique_y = (y); \
842 __cmp(unique_x, unique_y, op); })
843
844#define __careful_cmp(x, y, op) \
845 __builtin_choose_expr(__safe_cmp(x, y), \
846 __cmp(x, y, op), \
847 __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
848
849
850
851
852
853
854#define min(x, y) __careful_cmp(x, y, <)
855
856
857
858
859
860
861#define max(x, y) __careful_cmp(x, y, >)
862
863
864
865
866
867
868
869#define min3(x, y, z) min((typeof(x))min(x, y), z)
870
871
872
873
874
875
876
877#define max3(x, y, z) max((typeof(x))max(x, y), z)
878
879
880
881
882
883
884#define min_not_zero(x, y) ({ \
885 typeof(x) __x = (x); \
886 typeof(y) __y = (y); \
887 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
888
889
890
891
892
893
894
895
896
897
898#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
914
915
916
917
918
919
920
921#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
922
923
924
925
926
927
928
929
930
931
932
933#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
934
935
936
937
938
939
940
941
942
943
944
945
946#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
947
948
949
950
951
952
953
954#define swap(a, b) \
955 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
956
957
958#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
959#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
960
961#define __CONCAT(a, b) a ## b
962#define CONCATENATE(a, b) __CONCAT(a, b)
963
964
965
966
967
968
969
970
971#define container_of(ptr, type, member) ({ \
972 void *__mptr = (void *)(ptr); \
973 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
974 !__same_type(*(ptr), void), \
975 "pointer type mismatch in container_of()"); \
976 ((type *)(__mptr - offsetof(type, member))); })
977
978
979
980
981
982
983
984
985
986#define container_of_safe(ptr, type, member) ({ \
987 void *__mptr = (void *)(ptr); \
988 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
989 !__same_type(*(ptr), void), \
990 "pointer type mismatch in container_of()"); \
991 IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
992 ((type *)(__mptr - offsetof(type, member))); })
993
994
995#ifdef CONFIG_FTRACE_MCOUNT_RECORD
996# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
997#endif
998
999
1000#define VERIFY_OCTAL_PERMISSIONS(perms) \
1001 (BUILD_BUG_ON_ZERO((perms) < 0) + \
1002 BUILD_BUG_ON_ZERO((perms) > 0777) + \
1003 \
1004 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
1005 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
1006 \
1007 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
1008 \
1009 BUILD_BUG_ON_ZERO((perms) & 2) + \
1010 (perms))
1011#endif
1012