1
2#ifndef _LINUX_KERNEL_H
3#define _LINUX_KERNEL_H
4
5
6#include <stdarg.h>
7#include <linux/linkage.h>
8#include <linux/stddef.h>
9#include <linux/types.h>
10#include <linux/compiler.h>
11#include <linux/bitops.h>
12#include <linux/log2.h>
13#include <linux/typecheck.h>
14#include <linux/printk.h>
15#include <linux/build_bug.h>
16#include <asm/byteorder.h>
17#include <uapi/linux/kernel.h>
18
19#define USHRT_MAX ((u16)(~0U))
20#define SHRT_MAX ((s16)(USHRT_MAX>>1))
21#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
22#define INT_MAX ((int)(~0U>>1))
23#define INT_MIN (-INT_MAX - 1)
24#define UINT_MAX (~0U)
25#define LONG_MAX ((long)(~0UL>>1))
26#define LONG_MIN (-LONG_MAX - 1)
27#define ULONG_MAX (~0UL)
28#define LLONG_MAX ((long long)(~0ULL>>1))
29#define LLONG_MIN (-LLONG_MAX - 1)
30#define ULLONG_MAX (~0ULL)
31#define SIZE_MAX (~(size_t)0)
32#define PHYS_ADDR_MAX (~(phys_addr_t)0)
33
34#define U8_MAX ((u8)~0U)
35#define S8_MAX ((s8)(U8_MAX>>1))
36#define S8_MIN ((s8)(-S8_MAX - 1))
37#define U16_MAX ((u16)~0U)
38#define S16_MAX ((s16)(U16_MAX>>1))
39#define S16_MIN ((s16)(-S16_MAX - 1))
40#define U32_MAX ((u32)~0U)
41#define S32_MAX ((s32)(U32_MAX>>1))
42#define S32_MIN ((s32)(-S32_MAX - 1))
43#define U64_MAX ((u64)~0ULL)
44#define S64_MAX ((s64)(U64_MAX>>1))
45#define S64_MIN ((s64)(-S64_MAX - 1))
46
47#define STACK_MAGIC 0xdeadbeef
48
49
50
51
52
53
54
55#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
56
57
58#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
59#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
60#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
61#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
62#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
63
64
65#define READ 0
66#define WRITE 1
67
68
69
70
71
72#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
73
74#define u64_to_user_ptr(x) ( \
75{ \
76 typecheck(u64, x); \
77 (void __user *)(uintptr_t)x; \
78} \
79)
80
81
82
83
84
85
86
87#define __round_mask(x, y) ((__typeof__(x))((y)-1))
88
89
90
91
92
93
94
95
96#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
97
98
99
100
101
102
103
104
105#define round_down(x, y) ((x) & ~__round_mask(x, y))
106
107
108
109
110
111
112
113
114#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
115
116#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
117
118#define DIV_ROUND_DOWN_ULL(ll, d) \
119 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
120
121#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d))
122
123#if BITS_PER_LONG == 32
124# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
125#else
126# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
127#endif
128
129
130
131
132
133
134
135
136
137
138
139#define roundup(x, y) ( \
140{ \
141 const typeof(y) __y = y; \
142 (((x) + (__y - 1)) / __y) * __y; \
143} \
144)
145
146
147
148
149
150
151
152
153#define rounddown(x, y) ( \
154{ \
155 typeof(x) __x = (x); \
156 __x - (__x % (y)); \
157} \
158)
159
160
161
162
163
164
165
166#define DIV_ROUND_CLOSEST(x, divisor)( \
167{ \
168 typeof(x) __x = x; \
169 typeof(divisor) __d = divisor; \
170 (((typeof(x))-1) > 0 || \
171 ((typeof(divisor))-1) > 0 || \
172 (((__x) > 0) == ((__d) > 0))) ? \
173 (((__x) + ((__d) / 2)) / (__d)) : \
174 (((__x) - ((__d) / 2)) / (__d)); \
175} \
176)
177
178
179
180
181#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
182{ \
183 typeof(divisor) __d = divisor; \
184 unsigned long long _tmp = (x) + (__d) / 2; \
185 do_div(_tmp, __d); \
186 _tmp; \
187} \
188)
189
190
191
192
193
194#define mult_frac(x, numer, denom)( \
195{ \
196 typeof(x) quot = (x) / (denom); \
197 typeof(x) rem = (x) % (denom); \
198 (quot * (numer)) + ((rem * (numer)) / (denom)); \
199} \
200)
201
202
203#define _RET_IP_ (unsigned long)__builtin_return_address(0)
204#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
205
206#ifdef CONFIG_LBDAF
207# include <asm/div64.h>
208# define sector_div(a, b) do_div(a, b)
209#else
210# define sector_div(n, b)( \
211{ \
212 int _res; \
213 _res = (n) % (b); \
214 (n) /= (b); \
215 _res; \
216} \
217)
218#endif
219
220
221
222
223
224
225
226
227
228#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
229
230
231
232
233
234#define lower_32_bits(n) ((u32)(n))
235
236struct completion;
237struct pt_regs;
238struct user;
239
240#ifdef CONFIG_PREEMPT_VOLUNTARY
241extern int _cond_resched(void);
242# define might_resched() _cond_resched()
243#else
244# define might_resched() do { } while (0)
245#endif
246
247#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
248 void ___might_sleep(const char *file, int line, int preempt_offset);
249 void __might_sleep(const char *file, int line, int preempt_offset);
250
251
252
253
254
255
256
257
258
259
260# define might_sleep() \
261 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
262# define sched_annotate_sleep() (current->task_state_change = 0)
263#else
264 static inline void ___might_sleep(const char *file, int line,
265 int preempt_offset) { }
266 static inline void __might_sleep(const char *file, int line,
267 int preempt_offset) { }
268# define might_sleep() do { might_resched(); } while (0)
269# define sched_annotate_sleep() do { } while (0)
270#endif
271
272#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
273
274
275
276
277
278
279
280
281
282#define abs(x) __abs_choose_expr(x, long long, \
283 __abs_choose_expr(x, long, \
284 __abs_choose_expr(x, int, \
285 __abs_choose_expr(x, short, \
286 __abs_choose_expr(x, char, \
287 __builtin_choose_expr( \
288 __builtin_types_compatible_p(typeof(x), char), \
289 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
290 ((void)0)))))))
291
292#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
293 __builtin_types_compatible_p(typeof(x), signed type) || \
294 __builtin_types_compatible_p(typeof(x), unsigned type), \
295 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
312{
313 return (u32)(((u64) val * ep_ro) >> 32);
314}
315
316#if defined(CONFIG_MMU) && \
317 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
318#define might_fault() __might_fault(__FILE__, __LINE__)
319void __might_fault(const char *file, int line);
320#else
321static inline void might_fault(void) { }
322#endif
323
324extern struct atomic_notifier_head panic_notifier_list;
325extern long (*panic_blink)(int state);
326__printf(1, 2)
327void panic(const char *fmt, ...) __noreturn __cold;
328void nmi_panic(struct pt_regs *regs, const char *msg);
329extern void oops_enter(void);
330extern void oops_exit(void);
331void print_oops_end_marker(void);
332extern int oops_may_print(void);
333void do_exit(long error_code) __noreturn;
334void complete_and_exit(struct completion *, long) __noreturn;
335
336#ifdef CONFIG_ARCH_HAS_REFCOUNT
337void refcount_error_report(struct pt_regs *regs, const char *err);
338#else
339static inline void refcount_error_report(struct pt_regs *regs, const char *err)
340{ }
341#endif
342
343
344int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
345int __must_check _kstrtol(const char *s, unsigned int base, long *res);
346
347int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
348int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
367{
368
369
370
371
372 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
373 __alignof__(unsigned long) == __alignof__(unsigned long long))
374 return kstrtoull(s, base, (unsigned long long *)res);
375 else
376 return _kstrtoul(s, base, res);
377}
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
396{
397
398
399
400
401 if (sizeof(long) == sizeof(long long) &&
402 __alignof__(long) == __alignof__(long long))
403 return kstrtoll(s, base, (long long *)res);
404 else
405 return _kstrtol(s, base, res);
406}
407
408int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
409int __must_check kstrtoint(const char *s, unsigned int base, int *res);
410
411static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
412{
413 return kstrtoull(s, base, res);
414}
415
416static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
417{
418 return kstrtoll(s, base, res);
419}
420
421static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
422{
423 return kstrtouint(s, base, res);
424}
425
426static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
427{
428 return kstrtoint(s, base, res);
429}
430
431int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
432int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
433int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
434int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
435int __must_check kstrtobool(const char *s, bool *res);
436
437int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
438int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
439int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
440int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
441int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
442int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
443int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
444int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
445int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
446int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
447int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
448
449static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
450{
451 return kstrtoull_from_user(s, count, base, res);
452}
453
454static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
455{
456 return kstrtoll_from_user(s, count, base, res);
457}
458
459static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
460{
461 return kstrtouint_from_user(s, count, base, res);
462}
463
464static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
465{
466 return kstrtoint_from_user(s, count, base, res);
467}
468
469
470
471extern unsigned long simple_strtoul(const char *,char **,unsigned int);
472extern long simple_strtol(const char *,char **,unsigned int);
473extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
474extern long long simple_strtoll(const char *,char **,unsigned int);
475
476extern int num_to_str(char *buf, int size,
477 unsigned long long num, unsigned int width);
478
479
480
481extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
482extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
483extern __printf(3, 4)
484int snprintf(char *buf, size_t size, const char *fmt, ...);
485extern __printf(3, 0)
486int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
487extern __printf(3, 4)
488int scnprintf(char *buf, size_t size, const char *fmt, ...);
489extern __printf(3, 0)
490int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
491extern __printf(2, 3) __malloc
492char *kasprintf(gfp_t gfp, const char *fmt, ...);
493extern __printf(2, 0) __malloc
494char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
495extern __printf(2, 0)
496const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
497
498extern __scanf(2, 3)
499int sscanf(const char *, const char *, ...);
500extern __scanf(2, 0)
501int vsscanf(const char *, const char *, va_list);
502
503extern int get_option(char **str, int *pint);
504extern char *get_options(const char *str, int nints, int *ints);
505extern unsigned long long memparse(const char *ptr, char **retptr);
506extern bool parse_option_str(const char *str, const char *option);
507extern char *next_arg(char *args, char **param, char **val);
508
509extern int core_kernel_text(unsigned long addr);
510extern int init_kernel_text(unsigned long addr);
511extern int core_kernel_data(unsigned long addr);
512extern int __kernel_text_address(unsigned long addr);
513extern int kernel_text_address(unsigned long addr);
514extern int func_ptr_is_kernel_text(void *ptr);
515
516unsigned long int_sqrt(unsigned long);
517
518#if BITS_PER_LONG < 64
519u32 int_sqrt64(u64 x);
520#else
521static inline u32 int_sqrt64(u64 x)
522{
523 return (u32)int_sqrt(x);
524}
525#endif
526
527extern void bust_spinlocks(int yes);
528extern int oops_in_progress;
529extern int panic_timeout;
530extern int panic_on_oops;
531extern int panic_on_unrecovered_nmi;
532extern int panic_on_io_nmi;
533extern int panic_on_warn;
534extern int sysctl_panic_on_rcu_stall;
535extern int sysctl_panic_on_stackoverflow;
536
537extern bool crash_kexec_post_notifiers;
538
539
540
541
542
543
544extern atomic_t panic_cpu;
545#define PANIC_CPU_INVALID -1
546
547
548
549
550
551static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
552{
553 if (panic_timeout == arch_default_timeout)
554 panic_timeout = timeout;
555}
556extern const char *print_tainted(void);
557enum lockdep_ok {
558 LOCKDEP_STILL_OK,
559 LOCKDEP_NOW_UNRELIABLE
560};
561extern void add_taint(unsigned flag, enum lockdep_ok);
562extern int test_taint(unsigned flag);
563extern unsigned long get_taint(void);
564extern int root_mountflags;
565
566extern bool early_boot_irqs_disabled;
567
568
569
570
571
572extern enum system_states {
573 SYSTEM_BOOTING,
574 SYSTEM_SCHEDULING,
575 SYSTEM_RUNNING,
576 SYSTEM_HALT,
577 SYSTEM_POWER_OFF,
578 SYSTEM_RESTART,
579 SYSTEM_SUSPEND,
580} system_state;
581
582
583#define TAINT_PROPRIETARY_MODULE 0
584#define TAINT_FORCED_MODULE 1
585#define TAINT_CPU_OUT_OF_SPEC 2
586#define TAINT_FORCED_RMMOD 3
587#define TAINT_MACHINE_CHECK 4
588#define TAINT_BAD_PAGE 5
589#define TAINT_USER 6
590#define TAINT_DIE 7
591#define TAINT_OVERRIDDEN_ACPI_TABLE 8
592#define TAINT_WARN 9
593#define TAINT_CRAP 10
594#define TAINT_FIRMWARE_WORKAROUND 11
595#define TAINT_OOT_MODULE 12
596#define TAINT_UNSIGNED_MODULE 13
597#define TAINT_SOFTLOCKUP 14
598#define TAINT_LIVEPATCH 15
599#define TAINT_AUX 16
600#define TAINT_RANDSTRUCT 17
601#define TAINT_FLAGS_COUNT 18
602
603struct taint_flag {
604 char c_true;
605 char c_false;
606 bool module;
607};
608
609extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
610
611extern const char hex_asc[];
612#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
613#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
614
615static inline char *hex_byte_pack(char *buf, u8 byte)
616{
617 *buf++ = hex_asc_hi(byte);
618 *buf++ = hex_asc_lo(byte);
619 return buf;
620}
621
622extern const char hex_asc_upper[];
623#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
624#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
625
626static inline char *hex_byte_pack_upper(char *buf, u8 byte)
627{
628 *buf++ = hex_asc_upper_hi(byte);
629 *buf++ = hex_asc_upper_lo(byte);
630 return buf;
631}
632
633extern int hex_to_bin(char ch);
634extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
635extern char *bin2hex(char *dst, const void *src, size_t count);
636
637bool mac_pton(const char *s, u8 *mac);
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659enum ftrace_dump_mode {
660 DUMP_NONE,
661 DUMP_ALL,
662 DUMP_ORIG,
663};
664
665#ifdef CONFIG_TRACING
666void tracing_on(void);
667void tracing_off(void);
668int tracing_is_on(void);
669void tracing_snapshot(void);
670void tracing_snapshot_alloc(void);
671
672extern void tracing_start(void);
673extern void tracing_stop(void);
674
675static inline __printf(1, 2)
676void ____trace_printk_check_format(const char *fmt, ...)
677{
678}
679#define __trace_printk_check_format(fmt, args...) \
680do { \
681 if (0) \
682 ____trace_printk_check_format(fmt, ##args); \
683} while (0)
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715#define trace_printk(fmt, ...) \
716do { \
717 char _______STR[] = __stringify((__VA_ARGS__)); \
718 if (sizeof(_______STR) > 3) \
719 do_trace_printk(fmt, ##__VA_ARGS__); \
720 else \
721 trace_puts(fmt); \
722} while (0)
723
724#define do_trace_printk(fmt, args...) \
725do { \
726 static const char *trace_printk_fmt __used \
727 __attribute__((section("__trace_printk_fmt"))) = \
728 __builtin_constant_p(fmt) ? fmt : NULL; \
729 \
730 __trace_printk_check_format(fmt, ##args); \
731 \
732 if (__builtin_constant_p(fmt)) \
733 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
734 else \
735 __trace_printk(_THIS_IP_, fmt, ##args); \
736} while (0)
737
738extern __printf(2, 3)
739int __trace_bprintk(unsigned long ip, const char *fmt, ...);
740
741extern __printf(2, 3)
742int __trace_printk(unsigned long ip, const char *fmt, ...);
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769#define trace_puts(str) ({ \
770 static const char *trace_printk_fmt __used \
771 __attribute__((section("__trace_printk_fmt"))) = \
772 __builtin_constant_p(str) ? str : NULL; \
773 \
774 if (__builtin_constant_p(str)) \
775 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
776 else \
777 __trace_puts(_THIS_IP_, str, strlen(str)); \
778})
779extern int __trace_bputs(unsigned long ip, const char *str);
780extern int __trace_puts(unsigned long ip, const char *str, int size);
781
782extern void trace_dump_stack(int skip);
783
784
785
786
787
788
789#define ftrace_vprintk(fmt, vargs) \
790do { \
791 if (__builtin_constant_p(fmt)) { \
792 static const char *trace_printk_fmt __used \
793 __attribute__((section("__trace_printk_fmt"))) = \
794 __builtin_constant_p(fmt) ? fmt : NULL; \
795 \
796 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
797 } else \
798 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
799} while (0)
800
801extern __printf(2, 0) int
802__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
803
804extern __printf(2, 0) int
805__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
806
807extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
808#else
809static inline void tracing_start(void) { }
810static inline void tracing_stop(void) { }
811static inline void trace_dump_stack(int skip) { }
812
813static inline void tracing_on(void) { }
814static inline void tracing_off(void) { }
815static inline int tracing_is_on(void) { return 0; }
816static inline void tracing_snapshot(void) { }
817static inline void tracing_snapshot_alloc(void) { }
818
819static inline __printf(1, 2)
820int trace_printk(const char *fmt, ...)
821{
822 return 0;
823}
824static __printf(1, 0) inline int
825ftrace_vprintk(const char *fmt, va_list ap)
826{
827 return 0;
828}
829static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
830#endif
831
832
833
834
835
836
837
838
839
840
841
842
843
844#define __typecheck(x, y) \
845 (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
846
847
848
849
850
851
852#define __is_constexpr(x) \
853 (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
854
855#define __no_side_effects(x, y) \
856 (__is_constexpr(x) && __is_constexpr(y))
857
858#define __safe_cmp(x, y) \
859 (__typecheck(x, y) && __no_side_effects(x, y))
860
861#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
862
863#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
864 typeof(x) unique_x = (x); \
865 typeof(y) unique_y = (y); \
866 __cmp(unique_x, unique_y, op); })
867
868#define __careful_cmp(x, y, op) \
869 __builtin_choose_expr(__safe_cmp(x, y), \
870 __cmp(x, y, op), \
871 __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
872
873
874
875
876
877
878#define min(x, y) __careful_cmp(x, y, <)
879
880
881
882
883
884
885#define max(x, y) __careful_cmp(x, y, >)
886
887
888
889
890
891
892
893#define min3(x, y, z) min((typeof(x))min(x, y), z)
894
895
896
897
898
899
900
901#define max3(x, y, z) max((typeof(x))max(x, y), z)
902
903
904
905
906
907
908#define min_not_zero(x, y) ({ \
909 typeof(x) __x = (x); \
910 typeof(y) __y = (y); \
911 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
912
913
914
915
916
917
918
919
920
921
922#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
938
939
940
941
942
943
944
945#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
946
947
948
949
950
951
952
953
954
955
956
957#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
958
959
960
961
962
963
964
965
966
967
968
969
970#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
971
972
973
974
975
976
977
978#define swap(a, b) \
979 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
980
981
982#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
983#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
984
985#define __CONCAT(a, b) a ## b
986#define CONCATENATE(a, b) __CONCAT(a, b)
987
988
989
990
991
992
993
994
995#define container_of(ptr, type, member) ({ \
996 void *__mptr = (void *)(ptr); \
997 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
998 !__same_type(*(ptr), void), \
999 "pointer type mismatch in container_of()"); \
1000 ((type *)(__mptr - offsetof(type, member))); })
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010#define container_of_safe(ptr, type, member) ({ \
1011 void *__mptr = (void *)(ptr); \
1012 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
1013 !__same_type(*(ptr), void), \
1014 "pointer type mismatch in container_of()"); \
1015 IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
1016 ((type *)(__mptr - offsetof(type, member))); })
1017
1018
1019#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1020# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
1021#endif
1022
1023
1024#define VERIFY_OCTAL_PERMISSIONS(perms) \
1025 (BUILD_BUG_ON_ZERO((perms) < 0) + \
1026 BUILD_BUG_ON_ZERO((perms) > 0777) + \
1027 \
1028 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
1029 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
1030 \
1031 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
1032 \
1033 BUILD_BUG_ON_ZERO((perms) & 2) + \
1034 (perms))
1035#endif
1036