1
2#ifndef _LINUX_KERNEL_H
3#define _LINUX_KERNEL_H
4
5
6#include <stdarg.h>
7#include <linux/limits.h>
8#include <linux/linkage.h>
9#include <linux/stddef.h>
10#include <linux/types.h>
11#include <linux/compiler.h>
12#include <linux/bitops.h>
13#include <linux/log2.h>
14#include <linux/typecheck.h>
15#include <linux/printk.h>
16#include <linux/build_bug.h>
17#include <asm/byteorder.h>
18#include <asm/div64.h>
19#include <uapi/linux/kernel.h>
20#include <asm/div64.h>
21
22#define STACK_MAGIC 0xdeadbeef
23
24
25
26
27
28
29
30#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
31
32
33#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
34#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
35#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
36#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
37#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
38
39
40#define READ 0
41#define WRITE 1
42
43
44
45
46
47#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
48
49#define u64_to_user_ptr(x) ( \
50{ \
51 typecheck(u64, (x)); \
52 (void __user *)(uintptr_t)(x); \
53} \
54)
55
56
57
58
59
60
61
62#define __round_mask(x, y) ((__typeof__(x))((y)-1))
63
64
65
66
67
68
69
70
71#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
72
73
74
75
76
77
78
79
80#define round_down(x, y) ((x) & ~__round_mask(x, y))
81
82
83
84
85
86
87
88
89#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
90
91#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
92
93#define DIV_ROUND_DOWN_ULL(ll, d) \
94 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
95
96#define DIV_ROUND_UP_ULL(ll, d) \
97 DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
98
99#if BITS_PER_LONG == 32
100# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
101#else
102# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
103#endif
104
105
106
107
108
109
110
111
112
113#define roundup(x, y) ( \
114{ \
115 typeof(y) __y = y; \
116 (((x) + (__y - 1)) / __y) * __y; \
117} \
118)
119
120
121
122
123
124
125
126
127#define rounddown(x, y) ( \
128{ \
129 typeof(x) __x = (x); \
130 __x - (__x % (y)); \
131} \
132)
133
134
135
136
137
138
139
140#define DIV_ROUND_CLOSEST(x, divisor)( \
141{ \
142 typeof(x) __x = x; \
143 typeof(divisor) __d = divisor; \
144 (((typeof(x))-1) > 0 || \
145 ((typeof(divisor))-1) > 0 || \
146 (((__x) > 0) == ((__d) > 0))) ? \
147 (((__x) + ((__d) / 2)) / (__d)) : \
148 (((__x) - ((__d) / 2)) / (__d)); \
149} \
150)
151
152
153
154
155#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
156{ \
157 typeof(divisor) __d = divisor; \
158 unsigned long long _tmp = (x) + (__d) / 2; \
159 do_div(_tmp, __d); \
160 _tmp; \
161} \
162)
163
164
165
166
167
168#define mult_frac(x, numer, denom)( \
169{ \
170 typeof(x) quot = (x) / (denom); \
171 typeof(x) rem = (x) % (denom); \
172 (quot * (numer)) + ((rem * (numer)) / (denom)); \
173} \
174)
175
176
177#define _RET_IP_ (unsigned long)__builtin_return_address(0)
178#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
179
180#define sector_div(a, b) do_div(a, b)
181
182
183
184
185
186
187
188
189
190#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
191
192
193
194
195
196#define lower_32_bits(n) ((u32)(n))
197
198struct completion;
199struct pt_regs;
200struct user;
201
202#ifdef CONFIG_PREEMPT_VOLUNTARY
203extern int _cond_resched(void);
204# define might_resched() _cond_resched()
205#else
206# define might_resched() do { } while (0)
207#endif
208
209#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
210extern void ___might_sleep(const char *file, int line, int preempt_offset);
211extern void __might_sleep(const char *file, int line, int preempt_offset);
212extern void __cant_sleep(const char *file, int line, int preempt_offset);
213
214
215
216
217
218
219
220
221
222
223
224# define might_sleep() \
225 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
226
227
228
229
230
231# define cant_sleep() \
232 do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
233# define sched_annotate_sleep() (current->task_state_change = 0)
234#else
235 static inline void ___might_sleep(const char *file, int line,
236 int preempt_offset) { }
237 static inline void __might_sleep(const char *file, int line,
238 int preempt_offset) { }
239# define might_sleep() do { might_resched(); } while (0)
240# define cant_sleep() do { } while (0)
241# define sched_annotate_sleep() do { } while (0)
242#endif
243
244#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
245
246
247
248
249
250
251
252
253
254#define abs(x) __abs_choose_expr(x, long long, \
255 __abs_choose_expr(x, long, \
256 __abs_choose_expr(x, int, \
257 __abs_choose_expr(x, short, \
258 __abs_choose_expr(x, char, \
259 __builtin_choose_expr( \
260 __builtin_types_compatible_p(typeof(x), char), \
261 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
262 ((void)0)))))))
263
264#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
265 __builtin_types_compatible_p(typeof(x), signed type) || \
266 __builtin_types_compatible_p(typeof(x), unsigned type), \
267 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
284{
285 return (u32)(((u64) val * ep_ro) >> 32);
286}
287
288#if defined(CONFIG_MMU) && \
289 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
290#define might_fault() __might_fault(__FILE__, __LINE__)
291void __might_fault(const char *file, int line);
292#else
293static inline void might_fault(void) { }
294#endif
295
296extern struct atomic_notifier_head panic_notifier_list;
297extern long (*panic_blink)(int state);
298__printf(1, 2)
299void panic(const char *fmt, ...) __noreturn __cold;
300void nmi_panic(struct pt_regs *regs, const char *msg);
301extern void oops_enter(void);
302extern void oops_exit(void);
303void print_oops_end_marker(void);
304extern int oops_may_print(void);
305void do_exit(long error_code) __noreturn;
306void complete_and_exit(struct completion *, long) __noreturn;
307
308#ifdef CONFIG_ARCH_HAS_REFCOUNT
309void refcount_error_report(struct pt_regs *regs, const char *err);
310#else
311static inline void refcount_error_report(struct pt_regs *regs, const char *err)
312{ }
313#endif
314
315
316int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
317int __must_check _kstrtol(const char *s, unsigned int base, long *res);
318
319int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
320int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
339{
340
341
342
343
344 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
345 __alignof__(unsigned long) == __alignof__(unsigned long long))
346 return kstrtoull(s, base, (unsigned long long *)res);
347 else
348 return _kstrtoul(s, base, res);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
368{
369
370
371
372
373 if (sizeof(long) == sizeof(long long) &&
374 __alignof__(long) == __alignof__(long long))
375 return kstrtoll(s, base, (long long *)res);
376 else
377 return _kstrtol(s, base, res);
378}
379
380int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
381int __must_check kstrtoint(const char *s, unsigned int base, int *res);
382
383static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
384{
385 return kstrtoull(s, base, res);
386}
387
388static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
389{
390 return kstrtoll(s, base, res);
391}
392
393static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
394{
395 return kstrtouint(s, base, res);
396}
397
398static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
399{
400 return kstrtoint(s, base, res);
401}
402
403int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
404int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
405int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
406int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
407int __must_check kstrtobool(const char *s, bool *res);
408
409int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
410int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
411int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
412int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
413int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
414int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
415int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
416int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
417int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
418int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
419int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
420
421static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
422{
423 return kstrtoull_from_user(s, count, base, res);
424}
425
426static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
427{
428 return kstrtoll_from_user(s, count, base, res);
429}
430
431static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
432{
433 return kstrtouint_from_user(s, count, base, res);
434}
435
436static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
437{
438 return kstrtoint_from_user(s, count, base, res);
439}
440
441
442
443extern unsigned long simple_strtoul(const char *,char **,unsigned int);
444extern long simple_strtol(const char *,char **,unsigned int);
445extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
446extern long long simple_strtoll(const char *,char **,unsigned int);
447
448extern int num_to_str(char *buf, int size,
449 unsigned long long num, unsigned int width);
450
451
452
453extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
454extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
455extern __printf(3, 4)
456int snprintf(char *buf, size_t size, const char *fmt, ...);
457extern __printf(3, 0)
458int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
459extern __printf(3, 4)
460int scnprintf(char *buf, size_t size, const char *fmt, ...);
461extern __printf(3, 0)
462int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
463extern __printf(2, 3) __malloc
464char *kasprintf(gfp_t gfp, const char *fmt, ...);
465extern __printf(2, 0) __malloc
466char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
467extern __printf(2, 0)
468const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
469
470extern __scanf(2, 3)
471int sscanf(const char *, const char *, ...);
472extern __scanf(2, 0)
473int vsscanf(const char *, const char *, va_list);
474
475extern int get_option(char **str, int *pint);
476extern char *get_options(const char *str, int nints, int *ints);
477extern unsigned long long memparse(const char *ptr, char **retptr);
478extern bool parse_option_str(const char *str, const char *option);
479extern char *next_arg(char *args, char **param, char **val);
480
481extern int core_kernel_text(unsigned long addr);
482extern int init_kernel_text(unsigned long addr);
483extern int core_kernel_data(unsigned long addr);
484extern int __kernel_text_address(unsigned long addr);
485extern int kernel_text_address(unsigned long addr);
486extern int func_ptr_is_kernel_text(void *ptr);
487
488u64 int_pow(u64 base, unsigned int exp);
489unsigned long int_sqrt(unsigned long);
490
491#if BITS_PER_LONG < 64
492u32 int_sqrt64(u64 x);
493#else
494static inline u32 int_sqrt64(u64 x)
495{
496 return (u32)int_sqrt(x);
497}
498#endif
499
500extern void bust_spinlocks(int yes);
501extern int oops_in_progress;
502extern int panic_timeout;
503extern unsigned long panic_print;
504extern int panic_on_oops;
505extern int panic_on_unrecovered_nmi;
506extern int panic_on_io_nmi;
507extern int panic_on_warn;
508extern int sysctl_panic_on_rcu_stall;
509extern int sysctl_panic_on_stackoverflow;
510
511extern bool crash_kexec_post_notifiers;
512
513
514
515
516
517
518extern atomic_t panic_cpu;
519#define PANIC_CPU_INVALID -1
520
521
522
523
524
525static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
526{
527 if (panic_timeout == arch_default_timeout)
528 panic_timeout = timeout;
529}
530extern const char *print_tainted(void);
531enum lockdep_ok {
532 LOCKDEP_STILL_OK,
533 LOCKDEP_NOW_UNRELIABLE
534};
535extern void add_taint(unsigned flag, enum lockdep_ok);
536extern int test_taint(unsigned flag);
537extern unsigned long get_taint(void);
538extern int root_mountflags;
539
540extern bool early_boot_irqs_disabled;
541
542
543
544
545
546extern enum system_states {
547 SYSTEM_BOOTING,
548 SYSTEM_SCHEDULING,
549 SYSTEM_RUNNING,
550 SYSTEM_HALT,
551 SYSTEM_POWER_OFF,
552 SYSTEM_RESTART,
553 SYSTEM_SUSPEND,
554} system_state;
555
556
557#define TAINT_PROPRIETARY_MODULE 0
558#define TAINT_FORCED_MODULE 1
559#define TAINT_CPU_OUT_OF_SPEC 2
560#define TAINT_FORCED_RMMOD 3
561#define TAINT_MACHINE_CHECK 4
562#define TAINT_BAD_PAGE 5
563#define TAINT_USER 6
564#define TAINT_DIE 7
565#define TAINT_OVERRIDDEN_ACPI_TABLE 8
566#define TAINT_WARN 9
567#define TAINT_CRAP 10
568#define TAINT_FIRMWARE_WORKAROUND 11
569#define TAINT_OOT_MODULE 12
570#define TAINT_UNSIGNED_MODULE 13
571#define TAINT_SOFTLOCKUP 14
572#define TAINT_LIVEPATCH 15
573#define TAINT_AUX 16
574#define TAINT_RANDSTRUCT 17
575#define TAINT_FLAGS_COUNT 18
576
577struct taint_flag {
578 char c_true;
579 char c_false;
580 bool module;
581};
582
583extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
584
585extern const char hex_asc[];
586#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
587#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
588
589static inline char *hex_byte_pack(char *buf, u8 byte)
590{
591 *buf++ = hex_asc_hi(byte);
592 *buf++ = hex_asc_lo(byte);
593 return buf;
594}
595
596extern const char hex_asc_upper[];
597#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
598#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
599
600static inline char *hex_byte_pack_upper(char *buf, u8 byte)
601{
602 *buf++ = hex_asc_upper_hi(byte);
603 *buf++ = hex_asc_upper_lo(byte);
604 return buf;
605}
606
607extern int hex_to_bin(char ch);
608extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
609extern char *bin2hex(char *dst, const void *src, size_t count);
610
611bool mac_pton(const char *s, u8 *mac);
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633enum ftrace_dump_mode {
634 DUMP_NONE,
635 DUMP_ALL,
636 DUMP_ORIG,
637};
638
639#ifdef CONFIG_TRACING
640void tracing_on(void);
641void tracing_off(void);
642int tracing_is_on(void);
643void tracing_snapshot(void);
644void tracing_snapshot_alloc(void);
645
646extern void tracing_start(void);
647extern void tracing_stop(void);
648
649static inline __printf(1, 2)
650void ____trace_printk_check_format(const char *fmt, ...)
651{
652}
653#define __trace_printk_check_format(fmt, args...) \
654do { \
655 if (0) \
656 ____trace_printk_check_format(fmt, ##args); \
657} while (0)
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689#define trace_printk(fmt, ...) \
690do { \
691 char _______STR[] = __stringify((__VA_ARGS__)); \
692 if (sizeof(_______STR) > 3) \
693 do_trace_printk(fmt, ##__VA_ARGS__); \
694 else \
695 trace_puts(fmt); \
696} while (0)
697
698#define do_trace_printk(fmt, args...) \
699do { \
700 static const char *trace_printk_fmt __used \
701 __attribute__((section("__trace_printk_fmt"))) = \
702 __builtin_constant_p(fmt) ? fmt : NULL; \
703 \
704 __trace_printk_check_format(fmt, ##args); \
705 \
706 if (__builtin_constant_p(fmt)) \
707 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
708 else \
709 __trace_printk(_THIS_IP_, fmt, ##args); \
710} while (0)
711
712extern __printf(2, 3)
713int __trace_bprintk(unsigned long ip, const char *fmt, ...);
714
715extern __printf(2, 3)
716int __trace_printk(unsigned long ip, const char *fmt, ...);
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743#define trace_puts(str) ({ \
744 static const char *trace_printk_fmt __used \
745 __attribute__((section("__trace_printk_fmt"))) = \
746 __builtin_constant_p(str) ? str : NULL; \
747 \
748 if (__builtin_constant_p(str)) \
749 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
750 else \
751 __trace_puts(_THIS_IP_, str, strlen(str)); \
752})
753extern int __trace_bputs(unsigned long ip, const char *str);
754extern int __trace_puts(unsigned long ip, const char *str, int size);
755
756extern void trace_dump_stack(int skip);
757
758
759
760
761
762
763#define ftrace_vprintk(fmt, vargs) \
764do { \
765 if (__builtin_constant_p(fmt)) { \
766 static const char *trace_printk_fmt __used \
767 __attribute__((section("__trace_printk_fmt"))) = \
768 __builtin_constant_p(fmt) ? fmt : NULL; \
769 \
770 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
771 } else \
772 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
773} while (0)
774
775extern __printf(2, 0) int
776__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
777
778extern __printf(2, 0) int
779__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
780
781extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
782#else
783static inline void tracing_start(void) { }
784static inline void tracing_stop(void) { }
785static inline void trace_dump_stack(int skip) { }
786
787static inline void tracing_on(void) { }
788static inline void tracing_off(void) { }
789static inline int tracing_is_on(void) { return 0; }
790static inline void tracing_snapshot(void) { }
791static inline void tracing_snapshot_alloc(void) { }
792
793static inline __printf(1, 2)
794int trace_printk(const char *fmt, ...)
795{
796 return 0;
797}
798static __printf(1, 0) inline int
799ftrace_vprintk(const char *fmt, va_list ap)
800{
801 return 0;
802}
803static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
804#endif
805
806
807
808
809
810
811
812
813
814
815
816
817
818#define __typecheck(x, y) \
819 (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
820
821
822
823
824
825
826#define __is_constexpr(x) \
827 (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
828
829#define __no_side_effects(x, y) \
830 (__is_constexpr(x) && __is_constexpr(y))
831
832#define __safe_cmp(x, y) \
833 (__typecheck(x, y) && __no_side_effects(x, y))
834
835#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
836
837#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
838 typeof(x) unique_x = (x); \
839 typeof(y) unique_y = (y); \
840 __cmp(unique_x, unique_y, op); })
841
842#define __careful_cmp(x, y, op) \
843 __builtin_choose_expr(__safe_cmp(x, y), \
844 __cmp(x, y, op), \
845 __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
846
847
848
849
850
851
852#define min(x, y) __careful_cmp(x, y, <)
853
854
855
856
857
858
859#define max(x, y) __careful_cmp(x, y, >)
860
861
862
863
864
865
866
867#define min3(x, y, z) min((typeof(x))min(x, y), z)
868
869
870
871
872
873
874
875#define max3(x, y, z) max((typeof(x))max(x, y), z)
876
877
878
879
880
881
882#define min_not_zero(x, y) ({ \
883 typeof(x) __x = (x); \
884 typeof(y) __y = (y); \
885 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
886
887
888
889
890
891
892
893
894
895
896#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
912
913
914
915
916
917
918
919#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
920
921
922
923
924
925
926
927
928
929
930
931#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
932
933
934
935
936
937
938
939
940
941
942
943
944#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
945
946
947
948
949
950
951
952#define swap(a, b) \
953 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
954
955
956#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
957#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
958
959#define __CONCAT(a, b) a ## b
960#define CONCATENATE(a, b) __CONCAT(a, b)
961
962
963
964
965
966
967
968
969#define container_of(ptr, type, member) ({ \
970 void *__mptr = (void *)(ptr); \
971 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
972 !__same_type(*(ptr), void), \
973 "pointer type mismatch in container_of()"); \
974 ((type *)(__mptr - offsetof(type, member))); })
975
976
977
978
979
980
981
982
983
984#define container_of_safe(ptr, type, member) ({ \
985 void *__mptr = (void *)(ptr); \
986 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
987 !__same_type(*(ptr), void), \
988 "pointer type mismatch in container_of()"); \
989 IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
990 ((type *)(__mptr - offsetof(type, member))); })
991
992
993#ifdef CONFIG_FTRACE_MCOUNT_RECORD
994# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
995#endif
996
997
998#define VERIFY_OCTAL_PERMISSIONS(perms) \
999 (BUILD_BUG_ON_ZERO((perms) < 0) + \
1000 BUILD_BUG_ON_ZERO((perms) > 0777) + \
1001 \
1002 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
1003 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
1004 \
1005 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
1006 \
1007 BUILD_BUG_ON_ZERO((perms) & 2) + \
1008 (perms))
1009#endif
1010