1
2#ifndef _LINUX_KERNEL_H
3#define _LINUX_KERNEL_H
4
5
6#include <stdarg.h>
7#include <linux/linkage.h>
8#include <linux/stddef.h>
9#include <linux/types.h>
10#include <linux/compiler.h>
11#include <linux/bitops.h>
12#include <linux/log2.h>
13#include <linux/typecheck.h>
14#include <linux/printk.h>
15#include <linux/build_bug.h>
16#include <asm/byteorder.h>
17#include <uapi/linux/kernel.h>
18
19#define USHRT_MAX ((u16)(~0U))
20#define SHRT_MAX ((s16)(USHRT_MAX>>1))
21#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
22#define INT_MAX ((int)(~0U>>1))
23#define INT_MIN (-INT_MAX - 1)
24#define UINT_MAX (~0U)
25#define LONG_MAX ((long)(~0UL>>1))
26#define LONG_MIN (-LONG_MAX - 1)
27#define ULONG_MAX (~0UL)
28#define LLONG_MAX ((long long)(~0ULL>>1))
29#define LLONG_MIN (-LLONG_MAX - 1)
30#define ULLONG_MAX (~0ULL)
31#define SIZE_MAX (~(size_t)0)
32
33#define U8_MAX ((u8)~0U)
34#define S8_MAX ((s8)(U8_MAX>>1))
35#define S8_MIN ((s8)(-S8_MAX - 1))
36#define U16_MAX ((u16)~0U)
37#define S16_MAX ((s16)(U16_MAX>>1))
38#define S16_MIN ((s16)(-S16_MAX - 1))
39#define U32_MAX ((u32)~0U)
40#define S32_MAX ((s32)(U32_MAX>>1))
41#define S32_MIN ((s32)(-S32_MAX - 1))
42#define U64_MAX ((u64)~0ULL)
43#define S64_MAX ((s64)(U64_MAX>>1))
44#define S64_MIN ((s64)(-S64_MAX - 1))
45
46#define STACK_MAGIC 0xdeadbeef
47
48
49
50
51
52
53
54#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
55
56
57#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
58#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
59#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
60#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
61#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
62
63
64#define READ 0
65#define WRITE 1
66
67
68
69
70
71#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
72
73#define u64_to_user_ptr(x) ( \
74{ \
75 typecheck(u64, x); \
76 (void __user *)(uintptr_t)x; \
77} \
78)
79
80
81
82
83
84
85
86#define __round_mask(x, y) ((__typeof__(x))((y)-1))
87#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
88#define round_down(x, y) ((x) & ~__round_mask(x, y))
89
90
91
92
93
94
95
96
97#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
98
99#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
100
101#define DIV_ROUND_DOWN_ULL(ll, d) \
102 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
103
104#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d))
105
106#if BITS_PER_LONG == 32
107# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
108#else
109# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
110#endif
111
112
113#define roundup(x, y) ( \
114{ \
115 const typeof(y) __y = y; \
116 (((x) + (__y - 1)) / __y) * __y; \
117} \
118)
119#define rounddown(x, y) ( \
120{ \
121 typeof(x) __x = (x); \
122 __x - (__x % (y)); \
123} \
124)
125
126
127
128
129
130
131
132#define DIV_ROUND_CLOSEST(x, divisor)( \
133{ \
134 typeof(x) __x = x; \
135 typeof(divisor) __d = divisor; \
136 (((typeof(x))-1) > 0 || \
137 ((typeof(divisor))-1) > 0 || \
138 (((__x) > 0) == ((__d) > 0))) ? \
139 (((__x) + ((__d) / 2)) / (__d)) : \
140 (((__x) - ((__d) / 2)) / (__d)); \
141} \
142)
143
144
145
146
147#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
148{ \
149 typeof(divisor) __d = divisor; \
150 unsigned long long _tmp = (x) + (__d) / 2; \
151 do_div(_tmp, __d); \
152 _tmp; \
153} \
154)
155
156
157
158
159
160#define mult_frac(x, numer, denom)( \
161{ \
162 typeof(x) quot = (x) / (denom); \
163 typeof(x) rem = (x) % (denom); \
164 (quot * (numer)) + ((rem * (numer)) / (denom)); \
165} \
166)
167
168
169#define _RET_IP_ (unsigned long)__builtin_return_address(0)
170#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
171
172#ifdef CONFIG_LBDAF
173# include <asm/div64.h>
174# define sector_div(a, b) do_div(a, b)
175#else
176# define sector_div(n, b)( \
177{ \
178 int _res; \
179 _res = (n) % (b); \
180 (n) /= (b); \
181 _res; \
182} \
183)
184#endif
185
186
187
188
189
190
191
192
193
194#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
195
196
197
198
199
200#define lower_32_bits(n) ((u32)(n))
201
202struct completion;
203struct pt_regs;
204struct user;
205
206#ifdef CONFIG_PREEMPT_VOLUNTARY
207extern int _cond_resched(void);
208# define might_resched() _cond_resched()
209#else
210# define might_resched() do { } while (0)
211#endif
212
213#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
214 void ___might_sleep(const char *file, int line, int preempt_offset);
215 void __might_sleep(const char *file, int line, int preempt_offset);
216
217
218
219
220
221
222
223
224
225
226# define might_sleep() \
227 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
228# define sched_annotate_sleep() (current->task_state_change = 0)
229#else
230 static inline void ___might_sleep(const char *file, int line,
231 int preempt_offset) { }
232 static inline void __might_sleep(const char *file, int line,
233 int preempt_offset) { }
234# define might_sleep() do { might_resched(); } while (0)
235# define sched_annotate_sleep() do { } while (0)
236#endif
237
238#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
239
240
241
242
243
244
245
246
247
248#define abs(x) __abs_choose_expr(x, long long, \
249 __abs_choose_expr(x, long, \
250 __abs_choose_expr(x, int, \
251 __abs_choose_expr(x, short, \
252 __abs_choose_expr(x, char, \
253 __builtin_choose_expr( \
254 __builtin_types_compatible_p(typeof(x), char), \
255 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
256 ((void)0)))))))
257
258#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
259 __builtin_types_compatible_p(typeof(x), signed type) || \
260 __builtin_types_compatible_p(typeof(x), unsigned type), \
261 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
278{
279 return (u32)(((u64) val * ep_ro) >> 32);
280}
281
282#if defined(CONFIG_MMU) && \
283 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
284#define might_fault() __might_fault(__FILE__, __LINE__)
285void __might_fault(const char *file, int line);
286#else
287static inline void might_fault(void) { }
288#endif
289
290extern struct atomic_notifier_head panic_notifier_list;
291extern long (*panic_blink)(int state);
292__printf(1, 2)
293void panic(const char *fmt, ...) __noreturn __cold;
294void nmi_panic(struct pt_regs *regs, const char *msg);
295extern void oops_enter(void);
296extern void oops_exit(void);
297void print_oops_end_marker(void);
298extern int oops_may_print(void);
299void do_exit(long error_code) __noreturn;
300void complete_and_exit(struct completion *, long) __noreturn;
301
302#ifdef CONFIG_ARCH_HAS_REFCOUNT
303void refcount_error_report(struct pt_regs *regs, const char *err);
304#else
305static inline void refcount_error_report(struct pt_regs *regs, const char *err)
306{ }
307#endif
308
309
310int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
311int __must_check _kstrtol(const char *s, unsigned int base, long *res);
312
313int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
314int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
333{
334
335
336
337
338 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
339 __alignof__(unsigned long) == __alignof__(unsigned long long))
340 return kstrtoull(s, base, (unsigned long long *)res);
341 else
342 return _kstrtoul(s, base, res);
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
362{
363
364
365
366
367 if (sizeof(long) == sizeof(long long) &&
368 __alignof__(long) == __alignof__(long long))
369 return kstrtoll(s, base, (long long *)res);
370 else
371 return _kstrtol(s, base, res);
372}
373
374int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
375int __must_check kstrtoint(const char *s, unsigned int base, int *res);
376
377static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
378{
379 return kstrtoull(s, base, res);
380}
381
382static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
383{
384 return kstrtoll(s, base, res);
385}
386
387static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
388{
389 return kstrtouint(s, base, res);
390}
391
392static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
393{
394 return kstrtoint(s, base, res);
395}
396
397int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
398int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
399int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
400int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
401int __must_check kstrtobool(const char *s, bool *res);
402
403int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
404int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
405int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
406int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
407int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
408int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
409int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
410int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
411int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
412int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
413int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
414
415static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
416{
417 return kstrtoull_from_user(s, count, base, res);
418}
419
420static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
421{
422 return kstrtoll_from_user(s, count, base, res);
423}
424
425static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
426{
427 return kstrtouint_from_user(s, count, base, res);
428}
429
430static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
431{
432 return kstrtoint_from_user(s, count, base, res);
433}
434
435
436
437extern unsigned long simple_strtoul(const char *,char **,unsigned int);
438extern long simple_strtol(const char *,char **,unsigned int);
439extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
440extern long long simple_strtoll(const char *,char **,unsigned int);
441
442extern int num_to_str(char *buf, int size,
443 unsigned long long num, unsigned int width);
444
445
446
447extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
448extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
449extern __printf(3, 4)
450int snprintf(char *buf, size_t size, const char *fmt, ...);
451extern __printf(3, 0)
452int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
453extern __printf(3, 4)
454int scnprintf(char *buf, size_t size, const char *fmt, ...);
455extern __printf(3, 0)
456int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
457extern __printf(2, 3) __malloc
458char *kasprintf(gfp_t gfp, const char *fmt, ...);
459extern __printf(2, 0) __malloc
460char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
461extern __printf(2, 0)
462const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
463
464extern __scanf(2, 3)
465int sscanf(const char *, const char *, ...);
466extern __scanf(2, 0)
467int vsscanf(const char *, const char *, va_list);
468
469extern int get_option(char **str, int *pint);
470extern char *get_options(const char *str, int nints, int *ints);
471extern unsigned long long memparse(const char *ptr, char **retptr);
472extern bool parse_option_str(const char *str, const char *option);
473extern char *next_arg(char *args, char **param, char **val);
474
475extern int core_kernel_text(unsigned long addr);
476extern int init_kernel_text(unsigned long addr);
477extern int core_kernel_data(unsigned long addr);
478extern int __kernel_text_address(unsigned long addr);
479extern int kernel_text_address(unsigned long addr);
480extern int func_ptr_is_kernel_text(void *ptr);
481
482unsigned long int_sqrt(unsigned long);
483
484#if BITS_PER_LONG < 64
485u32 int_sqrt64(u64 x);
486#else
487static inline u32 int_sqrt64(u64 x)
488{
489 return (u32)int_sqrt(x);
490}
491#endif
492
493extern void bust_spinlocks(int yes);
494extern int oops_in_progress;
495extern int panic_timeout;
496extern int panic_on_oops;
497extern int panic_on_unrecovered_nmi;
498extern int panic_on_io_nmi;
499extern int panic_on_warn;
500extern int sysctl_panic_on_rcu_stall;
501extern int sysctl_panic_on_stackoverflow;
502
503extern bool crash_kexec_post_notifiers;
504
505
506
507
508
509
510extern atomic_t panic_cpu;
511#define PANIC_CPU_INVALID -1
512
513
514
515
516
517static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
518{
519 if (panic_timeout == arch_default_timeout)
520 panic_timeout = timeout;
521}
522extern const char *print_tainted(void);
523enum lockdep_ok {
524 LOCKDEP_STILL_OK,
525 LOCKDEP_NOW_UNRELIABLE
526};
527extern void add_taint(unsigned flag, enum lockdep_ok);
528extern int test_taint(unsigned flag);
529extern unsigned long get_taint(void);
530extern int root_mountflags;
531
532extern bool early_boot_irqs_disabled;
533
534
535
536
537
538extern enum system_states {
539 SYSTEM_BOOTING,
540 SYSTEM_SCHEDULING,
541 SYSTEM_RUNNING,
542 SYSTEM_HALT,
543 SYSTEM_POWER_OFF,
544 SYSTEM_RESTART,
545} system_state;
546
547
548#define TAINT_PROPRIETARY_MODULE 0
549#define TAINT_FORCED_MODULE 1
550#define TAINT_CPU_OUT_OF_SPEC 2
551#define TAINT_FORCED_RMMOD 3
552#define TAINT_MACHINE_CHECK 4
553#define TAINT_BAD_PAGE 5
554#define TAINT_USER 6
555#define TAINT_DIE 7
556#define TAINT_OVERRIDDEN_ACPI_TABLE 8
557#define TAINT_WARN 9
558#define TAINT_CRAP 10
559#define TAINT_FIRMWARE_WORKAROUND 11
560#define TAINT_OOT_MODULE 12
561#define TAINT_UNSIGNED_MODULE 13
562#define TAINT_SOFTLOCKUP 14
563#define TAINT_LIVEPATCH 15
564#define TAINT_AUX 16
565#define TAINT_RANDSTRUCT 17
566#define TAINT_FLAGS_COUNT 18
567
568struct taint_flag {
569 char c_true;
570 char c_false;
571 bool module;
572};
573
574extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
575
576extern const char hex_asc[];
577#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
578#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
579
580static inline char *hex_byte_pack(char *buf, u8 byte)
581{
582 *buf++ = hex_asc_hi(byte);
583 *buf++ = hex_asc_lo(byte);
584 return buf;
585}
586
587extern const char hex_asc_upper[];
588#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
589#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
590
591static inline char *hex_byte_pack_upper(char *buf, u8 byte)
592{
593 *buf++ = hex_asc_upper_hi(byte);
594 *buf++ = hex_asc_upper_lo(byte);
595 return buf;
596}
597
598extern int hex_to_bin(char ch);
599extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
600extern char *bin2hex(char *dst, const void *src, size_t count);
601
602bool mac_pton(const char *s, u8 *mac);
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624enum ftrace_dump_mode {
625 DUMP_NONE,
626 DUMP_ALL,
627 DUMP_ORIG,
628};
629
630#ifdef CONFIG_TRACING
631void tracing_on(void);
632void tracing_off(void);
633int tracing_is_on(void);
634void tracing_snapshot(void);
635void tracing_snapshot_alloc(void);
636
637extern void tracing_start(void);
638extern void tracing_stop(void);
639
640static inline __printf(1, 2)
641void ____trace_printk_check_format(const char *fmt, ...)
642{
643}
644#define __trace_printk_check_format(fmt, args...) \
645do { \
646 if (0) \
647 ____trace_printk_check_format(fmt, ##args); \
648} while (0)
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680#define trace_printk(fmt, ...) \
681do { \
682 char _______STR[] = __stringify((__VA_ARGS__)); \
683 if (sizeof(_______STR) > 3) \
684 do_trace_printk(fmt, ##__VA_ARGS__); \
685 else \
686 trace_puts(fmt); \
687} while (0)
688
689#define do_trace_printk(fmt, args...) \
690do { \
691 static const char *trace_printk_fmt __used \
692 __attribute__((section("__trace_printk_fmt"))) = \
693 __builtin_constant_p(fmt) ? fmt : NULL; \
694 \
695 __trace_printk_check_format(fmt, ##args); \
696 \
697 if (__builtin_constant_p(fmt)) \
698 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
699 else \
700 __trace_printk(_THIS_IP_, fmt, ##args); \
701} while (0)
702
703extern __printf(2, 3)
704int __trace_bprintk(unsigned long ip, const char *fmt, ...);
705
706extern __printf(2, 3)
707int __trace_printk(unsigned long ip, const char *fmt, ...);
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734#define trace_puts(str) ({ \
735 static const char *trace_printk_fmt __used \
736 __attribute__((section("__trace_printk_fmt"))) = \
737 __builtin_constant_p(str) ? str : NULL; \
738 \
739 if (__builtin_constant_p(str)) \
740 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
741 else \
742 __trace_puts(_THIS_IP_, str, strlen(str)); \
743})
744extern int __trace_bputs(unsigned long ip, const char *str);
745extern int __trace_puts(unsigned long ip, const char *str, int size);
746
747extern void trace_dump_stack(int skip);
748
749
750
751
752
753
754#define ftrace_vprintk(fmt, vargs) \
755do { \
756 if (__builtin_constant_p(fmt)) { \
757 static const char *trace_printk_fmt __used \
758 __attribute__((section("__trace_printk_fmt"))) = \
759 __builtin_constant_p(fmt) ? fmt : NULL; \
760 \
761 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
762 } else \
763 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
764} while (0)
765
766extern __printf(2, 0) int
767__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
768
769extern __printf(2, 0) int
770__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
771
772extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
773#else
774static inline void tracing_start(void) { }
775static inline void tracing_stop(void) { }
776static inline void trace_dump_stack(int skip) { }
777
778static inline void tracing_on(void) { }
779static inline void tracing_off(void) { }
780static inline int tracing_is_on(void) { return 0; }
781static inline void tracing_snapshot(void) { }
782static inline void tracing_snapshot_alloc(void) { }
783
784static inline __printf(1, 2)
785int trace_printk(const char *fmt, ...)
786{
787 return 0;
788}
789static __printf(1, 0) inline int
790ftrace_vprintk(const char *fmt, va_list ap)
791{
792 return 0;
793}
794static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
795#endif
796
797
798
799
800
801
802
803
804
805
806
807
808
809#define __typecheck(x, y) \
810 (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
811
812
813
814
815
816
817#define __is_constexpr(x) \
818 (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
819
820#define __no_side_effects(x, y) \
821 (__is_constexpr(x) && __is_constexpr(y))
822
823#define __safe_cmp(x, y) \
824 (__typecheck(x, y) && __no_side_effects(x, y))
825
826#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
827
828#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
829 typeof(x) unique_x = (x); \
830 typeof(y) unique_y = (y); \
831 __cmp(unique_x, unique_y, op); })
832
833#define __careful_cmp(x, y, op) \
834 __builtin_choose_expr(__safe_cmp(x, y), \
835 __cmp(x, y, op), \
836 __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
837
838
839
840
841
842
843#define min(x, y) __careful_cmp(x, y, <)
844
845
846
847
848
849
850#define max(x, y) __careful_cmp(x, y, >)
851
852
853
854
855
856
857
858#define min3(x, y, z) min((typeof(x))min(x, y), z)
859
860
861
862
863
864
865
866#define max3(x, y, z) max((typeof(x))max(x, y), z)
867
868
869
870
871
872
873#define min_not_zero(x, y) ({ \
874 typeof(x) __x = (x); \
875 typeof(y) __y = (y); \
876 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
877
878
879
880
881
882
883
884
885
886
887#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
903
904
905
906
907
908
909
910#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
911
912
913
914
915
916
917
918
919
920
921
922#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
923
924
925
926
927
928
929
930
931
932
933
934
935#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
936
937
938
939
940
941
942
943#define swap(a, b) \
944 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
945
946
947#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
948#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
949
950#define __CONCAT(a, b) a ## b
951#define CONCATENATE(a, b) __CONCAT(a, b)
952
953
954
955
956
957
958
959
960#define container_of(ptr, type, member) ({ \
961 void *__mptr = (void *)(ptr); \
962 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
963 !__same_type(*(ptr), void), \
964 "pointer type mismatch in container_of()"); \
965 ((type *)(__mptr - offsetof(type, member))); })
966
967
968#ifdef CONFIG_FTRACE_MCOUNT_RECORD
969# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
970#endif
971
972
973#define VERIFY_OCTAL_PERMISSIONS(perms) \
974 (BUILD_BUG_ON_ZERO((perms) < 0) + \
975 BUILD_BUG_ON_ZERO((perms) > 0777) + \
976 \
977 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
978 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
979 \
980 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
981 \
982 BUILD_BUG_ON_ZERO((perms) & 2) + \
983 (perms))
984#endif
985