1
2#ifndef _LINUX_KERNEL_H
3#define _LINUX_KERNEL_H
4
5
6#include <stdarg.h>
7#include <linux/limits.h>
8#include <linux/linkage.h>
9#include <linux/stddef.h>
10#include <linux/types.h>
11#include <linux/compiler.h>
12#include <linux/bitops.h>
13#include <linux/log2.h>
14#include <linux/typecheck.h>
15#include <linux/printk.h>
16#include <linux/build_bug.h>
17#include <asm/byteorder.h>
18#include <asm/div64.h>
19#include <uapi/linux/kernel.h>
20
21#define STACK_MAGIC 0xdeadbeef
22
23
24
25
26
27
28
29#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
30
31
32#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
33#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
34#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
35#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
36#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
37
38
39#define READ 0
40#define WRITE 1
41
42
43
44
45
46#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
47
48#define u64_to_user_ptr(x) ( \
49{ \
50 typecheck(u64, (x)); \
51 (void __user *)(uintptr_t)(x); \
52} \
53)
54
55
56
57
58
59
60
61#define __round_mask(x, y) ((__typeof__(x))((y)-1))
62
63
64
65
66
67
68
69
70#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
71
72
73
74
75
76
77
78
79#define round_down(x, y) ((x) & ~__round_mask(x, y))
80
81
82
83
84
85
86
87
88#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
89
90#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
91
92#define DIV_ROUND_DOWN_ULL(ll, d) \
93 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
94
95#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d))
96
97#if BITS_PER_LONG == 32
98# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
99#else
100# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
101#endif
102
103
104
105
106
107
108
109
110
111#define roundup(x, y) ( \
112{ \
113 typeof(y) __y = y; \
114 (((x) + (__y - 1)) / __y) * __y; \
115} \
116)
117
118
119
120
121
122
123
124
125#define rounddown(x, y) ( \
126{ \
127 typeof(x) __x = (x); \
128 __x - (__x % (y)); \
129} \
130)
131
132
133
134
135
136
137
138#define DIV_ROUND_CLOSEST(x, divisor)( \
139{ \
140 typeof(x) __x = x; \
141 typeof(divisor) __d = divisor; \
142 (((typeof(x))-1) > 0 || \
143 ((typeof(divisor))-1) > 0 || \
144 (((__x) > 0) == ((__d) > 0))) ? \
145 (((__x) + ((__d) / 2)) / (__d)) : \
146 (((__x) - ((__d) / 2)) / (__d)); \
147} \
148)
149
150
151
152
153#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
154{ \
155 typeof(divisor) __d = divisor; \
156 unsigned long long _tmp = (x) + (__d) / 2; \
157 do_div(_tmp, __d); \
158 _tmp; \
159} \
160)
161
162
163
164
165
166#define mult_frac(x, numer, denom)( \
167{ \
168 typeof(x) quot = (x) / (denom); \
169 typeof(x) rem = (x) % (denom); \
170 (quot * (numer)) + ((rem * (numer)) / (denom)); \
171} \
172)
173
174
175#define _RET_IP_ (unsigned long)__builtin_return_address(0)
176#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
177
178#ifdef CONFIG_LBDAF
179# define sector_div(a, b) do_div(a, b)
180#else
181# define sector_div(n, b)( \
182{ \
183 int _res; \
184 _res = (n) % (b); \
185 (n) /= (b); \
186 _res; \
187} \
188)
189#endif
190
191
192
193
194
195
196
197
198
199#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
200
201
202
203
204
205#define lower_32_bits(n) ((u32)(n))
206
207struct completion;
208struct pt_regs;
209struct user;
210
211#ifdef CONFIG_PREEMPT_VOLUNTARY
212extern int _cond_resched(void);
213# define might_resched() _cond_resched()
214#else
215# define might_resched() do { } while (0)
216#endif
217
218#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
219extern void ___might_sleep(const char *file, int line, int preempt_offset);
220extern void __might_sleep(const char *file, int line, int preempt_offset);
221extern void __cant_sleep(const char *file, int line, int preempt_offset);
222
223
224
225
226
227
228
229
230
231
232
233# define might_sleep() \
234 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
235
236
237
238
239
240# define cant_sleep() \
241 do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
242# define sched_annotate_sleep() (current->task_state_change = 0)
243#else
244 static inline void ___might_sleep(const char *file, int line,
245 int preempt_offset) { }
246 static inline void __might_sleep(const char *file, int line,
247 int preempt_offset) { }
248# define might_sleep() do { might_resched(); } while (0)
249# define cant_sleep() do { } while (0)
250# define sched_annotate_sleep() do { } while (0)
251#endif
252
253#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
254
255
256
257
258
259
260
261
262
263#define abs(x) __abs_choose_expr(x, long long, \
264 __abs_choose_expr(x, long, \
265 __abs_choose_expr(x, int, \
266 __abs_choose_expr(x, short, \
267 __abs_choose_expr(x, char, \
268 __builtin_choose_expr( \
269 __builtin_types_compatible_p(typeof(x), char), \
270 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
271 ((void)0)))))))
272
273#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
274 __builtin_types_compatible_p(typeof(x), signed type) || \
275 __builtin_types_compatible_p(typeof(x), unsigned type), \
276 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
293{
294 return (u32)(((u64) val * ep_ro) >> 32);
295}
296
297#if defined(CONFIG_MMU) && \
298 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
299#define might_fault() __might_fault(__FILE__, __LINE__)
300void __might_fault(const char *file, int line);
301#else
302static inline void might_fault(void) { }
303#endif
304
305extern struct atomic_notifier_head panic_notifier_list;
306extern long (*panic_blink)(int state);
307__printf(1, 2)
308void panic(const char *fmt, ...) __noreturn __cold;
309void nmi_panic(struct pt_regs *regs, const char *msg);
310extern void oops_enter(void);
311extern void oops_exit(void);
312void print_oops_end_marker(void);
313extern int oops_may_print(void);
314void do_exit(long error_code) __noreturn;
315void complete_and_exit(struct completion *, long) __noreturn;
316
317#ifdef CONFIG_ARCH_HAS_REFCOUNT
318void refcount_error_report(struct pt_regs *regs, const char *err);
319#else
320static inline void refcount_error_report(struct pt_regs *regs, const char *err)
321{ }
322#endif
323
324
325int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
326int __must_check _kstrtol(const char *s, unsigned int base, long *res);
327
328int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
329int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
348{
349
350
351
352
353 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
354 __alignof__(unsigned long) == __alignof__(unsigned long long))
355 return kstrtoull(s, base, (unsigned long long *)res);
356 else
357 return _kstrtoul(s, base, res);
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
377{
378
379
380
381
382 if (sizeof(long) == sizeof(long long) &&
383 __alignof__(long) == __alignof__(long long))
384 return kstrtoll(s, base, (long long *)res);
385 else
386 return _kstrtol(s, base, res);
387}
388
389int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
390int __must_check kstrtoint(const char *s, unsigned int base, int *res);
391
392static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
393{
394 return kstrtoull(s, base, res);
395}
396
397static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
398{
399 return kstrtoll(s, base, res);
400}
401
402static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
403{
404 return kstrtouint(s, base, res);
405}
406
407static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
408{
409 return kstrtoint(s, base, res);
410}
411
412int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
413int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
414int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
415int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
416int __must_check kstrtobool(const char *s, bool *res);
417
418int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
419int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
420int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
421int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
422int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
423int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
424int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
425int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
426int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
427int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
428int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
429
430static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
431{
432 return kstrtoull_from_user(s, count, base, res);
433}
434
435static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
436{
437 return kstrtoll_from_user(s, count, base, res);
438}
439
440static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
441{
442 return kstrtouint_from_user(s, count, base, res);
443}
444
445static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
446{
447 return kstrtoint_from_user(s, count, base, res);
448}
449
450
451
452extern unsigned long simple_strtoul(const char *,char **,unsigned int);
453extern long simple_strtol(const char *,char **,unsigned int);
454extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
455extern long long simple_strtoll(const char *,char **,unsigned int);
456
457extern int num_to_str(char *buf, int size,
458 unsigned long long num, unsigned int width);
459
460
461
462extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
463extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
464extern __printf(3, 4)
465int snprintf(char *buf, size_t size, const char *fmt, ...);
466extern __printf(3, 0)
467int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
468extern __printf(3, 4)
469int scnprintf(char *buf, size_t size, const char *fmt, ...);
470extern __printf(3, 0)
471int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
472extern __printf(2, 3) __malloc
473char *kasprintf(gfp_t gfp, const char *fmt, ...);
474extern __printf(2, 0) __malloc
475char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
476extern __printf(2, 0)
477const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
478
479extern __scanf(2, 3)
480int sscanf(const char *, const char *, ...);
481extern __scanf(2, 0)
482int vsscanf(const char *, const char *, va_list);
483
484extern int get_option(char **str, int *pint);
485extern char *get_options(const char *str, int nints, int *ints);
486extern unsigned long long memparse(const char *ptr, char **retptr);
487extern bool parse_option_str(const char *str, const char *option);
488extern char *next_arg(char *args, char **param, char **val);
489
490extern int core_kernel_text(unsigned long addr);
491extern int init_kernel_text(unsigned long addr);
492extern int core_kernel_data(unsigned long addr);
493extern int __kernel_text_address(unsigned long addr);
494extern int kernel_text_address(unsigned long addr);
495extern int func_ptr_is_kernel_text(void *ptr);
496
497unsigned long int_sqrt(unsigned long);
498
499#if BITS_PER_LONG < 64
500u32 int_sqrt64(u64 x);
501#else
502static inline u32 int_sqrt64(u64 x)
503{
504 return (u32)int_sqrt(x);
505}
506#endif
507
508extern void bust_spinlocks(int yes);
509extern int oops_in_progress;
510extern int panic_timeout;
511extern unsigned long panic_print;
512extern int panic_on_oops;
513extern int panic_on_unrecovered_nmi;
514extern int panic_on_io_nmi;
515extern int panic_on_warn;
516extern int sysctl_panic_on_rcu_stall;
517extern int sysctl_panic_on_stackoverflow;
518
519extern bool crash_kexec_post_notifiers;
520
521
522
523
524
525
526extern atomic_t panic_cpu;
527#define PANIC_CPU_INVALID -1
528
529
530
531
532
533static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
534{
535 if (panic_timeout == arch_default_timeout)
536 panic_timeout = timeout;
537}
538extern const char *print_tainted(void);
539enum lockdep_ok {
540 LOCKDEP_STILL_OK,
541 LOCKDEP_NOW_UNRELIABLE
542};
543extern void add_taint(unsigned flag, enum lockdep_ok);
544extern int test_taint(unsigned flag);
545extern unsigned long get_taint(void);
546extern int root_mountflags;
547
548extern bool early_boot_irqs_disabled;
549
550
551
552
553
554extern enum system_states {
555 SYSTEM_BOOTING,
556 SYSTEM_SCHEDULING,
557 SYSTEM_RUNNING,
558 SYSTEM_HALT,
559 SYSTEM_POWER_OFF,
560 SYSTEM_RESTART,
561 SYSTEM_SUSPEND,
562} system_state;
563
564
565#define TAINT_PROPRIETARY_MODULE 0
566#define TAINT_FORCED_MODULE 1
567#define TAINT_CPU_OUT_OF_SPEC 2
568#define TAINT_FORCED_RMMOD 3
569#define TAINT_MACHINE_CHECK 4
570#define TAINT_BAD_PAGE 5
571#define TAINT_USER 6
572#define TAINT_DIE 7
573#define TAINT_OVERRIDDEN_ACPI_TABLE 8
574#define TAINT_WARN 9
575#define TAINT_CRAP 10
576#define TAINT_FIRMWARE_WORKAROUND 11
577#define TAINT_OOT_MODULE 12
578#define TAINT_UNSIGNED_MODULE 13
579#define TAINT_SOFTLOCKUP 14
580#define TAINT_LIVEPATCH 15
581#define TAINT_AUX 16
582#define TAINT_RANDSTRUCT 17
583#define TAINT_FLAGS_COUNT 18
584
585struct taint_flag {
586 char c_true;
587 char c_false;
588 bool module;
589};
590
591extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
592
593extern const char hex_asc[];
594#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
595#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
596
597static inline char *hex_byte_pack(char *buf, u8 byte)
598{
599 *buf++ = hex_asc_hi(byte);
600 *buf++ = hex_asc_lo(byte);
601 return buf;
602}
603
604extern const char hex_asc_upper[];
605#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
606#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
607
608static inline char *hex_byte_pack_upper(char *buf, u8 byte)
609{
610 *buf++ = hex_asc_upper_hi(byte);
611 *buf++ = hex_asc_upper_lo(byte);
612 return buf;
613}
614
615extern int hex_to_bin(char ch);
616extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
617extern char *bin2hex(char *dst, const void *src, size_t count);
618
619bool mac_pton(const char *s, u8 *mac);
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641enum ftrace_dump_mode {
642 DUMP_NONE,
643 DUMP_ALL,
644 DUMP_ORIG,
645};
646
647#ifdef CONFIG_TRACING
648void tracing_on(void);
649void tracing_off(void);
650int tracing_is_on(void);
651void tracing_snapshot(void);
652void tracing_snapshot_alloc(void);
653
654extern void tracing_start(void);
655extern void tracing_stop(void);
656
657static inline __printf(1, 2)
658void ____trace_printk_check_format(const char *fmt, ...)
659{
660}
661#define __trace_printk_check_format(fmt, args...) \
662do { \
663 if (0) \
664 ____trace_printk_check_format(fmt, ##args); \
665} while (0)
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697#define trace_printk(fmt, ...) \
698do { \
699 char _______STR[] = __stringify((__VA_ARGS__)); \
700 if (sizeof(_______STR) > 3) \
701 do_trace_printk(fmt, ##__VA_ARGS__); \
702 else \
703 trace_puts(fmt); \
704} while (0)
705
706#define do_trace_printk(fmt, args...) \
707do { \
708 static const char *trace_printk_fmt __used \
709 __attribute__((section("__trace_printk_fmt"))) = \
710 __builtin_constant_p(fmt) ? fmt : NULL; \
711 \
712 __trace_printk_check_format(fmt, ##args); \
713 \
714 if (__builtin_constant_p(fmt)) \
715 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
716 else \
717 __trace_printk(_THIS_IP_, fmt, ##args); \
718} while (0)
719
720extern __printf(2, 3)
721int __trace_bprintk(unsigned long ip, const char *fmt, ...);
722
723extern __printf(2, 3)
724int __trace_printk(unsigned long ip, const char *fmt, ...);
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751#define trace_puts(str) ({ \
752 static const char *trace_printk_fmt __used \
753 __attribute__((section("__trace_printk_fmt"))) = \
754 __builtin_constant_p(str) ? str : NULL; \
755 \
756 if (__builtin_constant_p(str)) \
757 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
758 else \
759 __trace_puts(_THIS_IP_, str, strlen(str)); \
760})
761extern int __trace_bputs(unsigned long ip, const char *str);
762extern int __trace_puts(unsigned long ip, const char *str, int size);
763
764extern void trace_dump_stack(int skip);
765
766
767
768
769
770
771#define ftrace_vprintk(fmt, vargs) \
772do { \
773 if (__builtin_constant_p(fmt)) { \
774 static const char *trace_printk_fmt __used \
775 __attribute__((section("__trace_printk_fmt"))) = \
776 __builtin_constant_p(fmt) ? fmt : NULL; \
777 \
778 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
779 } else \
780 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
781} while (0)
782
783extern __printf(2, 0) int
784__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
785
786extern __printf(2, 0) int
787__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
788
789extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
790#else
791static inline void tracing_start(void) { }
792static inline void tracing_stop(void) { }
793static inline void trace_dump_stack(int skip) { }
794
795static inline void tracing_on(void) { }
796static inline void tracing_off(void) { }
797static inline int tracing_is_on(void) { return 0; }
798static inline void tracing_snapshot(void) { }
799static inline void tracing_snapshot_alloc(void) { }
800
801static inline __printf(1, 2)
802int trace_printk(const char *fmt, ...)
803{
804 return 0;
805}
806static __printf(1, 0) inline int
807ftrace_vprintk(const char *fmt, va_list ap)
808{
809 return 0;
810}
811static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
812#endif
813
814
815
816
817
818
819
820
821
822
823
824
825
826#define __typecheck(x, y) \
827 (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
828
829
830
831
832
833
834#define __is_constexpr(x) \
835 (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
836
837#define __no_side_effects(x, y) \
838 (__is_constexpr(x) && __is_constexpr(y))
839
840#define __safe_cmp(x, y) \
841 (__typecheck(x, y) && __no_side_effects(x, y))
842
843#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
844
845#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
846 typeof(x) unique_x = (x); \
847 typeof(y) unique_y = (y); \
848 __cmp(unique_x, unique_y, op); })
849
850#define __careful_cmp(x, y, op) \
851 __builtin_choose_expr(__safe_cmp(x, y), \
852 __cmp(x, y, op), \
853 __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
854
855
856
857
858
859
860#define min(x, y) __careful_cmp(x, y, <)
861
862
863
864
865
866
867#define max(x, y) __careful_cmp(x, y, >)
868
869
870
871
872
873
874
875#define min3(x, y, z) min((typeof(x))min(x, y), z)
876
877
878
879
880
881
882
883#define max3(x, y, z) max((typeof(x))max(x, y), z)
884
885
886
887
888
889
890#define min_not_zero(x, y) ({ \
891 typeof(x) __x = (x); \
892 typeof(y) __y = (y); \
893 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
894
895
896
897
898
899
900
901
902
903
904#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
920
921
922
923
924
925
926
927#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
928
929
930
931
932
933
934
935
936
937
938
939#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
940
941
942
943
944
945
946
947
948
949
950
951
952#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
953
954
955
956
957
958
959
960#define swap(a, b) \
961 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
962
963
964#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
965#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
966
967#define __CONCAT(a, b) a ## b
968#define CONCATENATE(a, b) __CONCAT(a, b)
969
970
971
972
973
974
975
976
977#define container_of(ptr, type, member) ({ \
978 void *__mptr = (void *)(ptr); \
979 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
980 !__same_type(*(ptr), void), \
981 "pointer type mismatch in container_of()"); \
982 ((type *)(__mptr - offsetof(type, member))); })
983
984
985
986
987
988
989
990
991
992#define container_of_safe(ptr, type, member) ({ \
993 void *__mptr = (void *)(ptr); \
994 BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
995 !__same_type(*(ptr), void), \
996 "pointer type mismatch in container_of()"); \
997 IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
998 ((type *)(__mptr - offsetof(type, member))); })
999
1000
1001#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1002# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
1003#endif
1004
1005
1006#define VERIFY_OCTAL_PERMISSIONS(perms) \
1007 (BUILD_BUG_ON_ZERO((perms) < 0) + \
1008 BUILD_BUG_ON_ZERO((perms) > 0777) + \
1009 \
1010 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
1011 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
1012 \
1013 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
1014 \
1015 BUILD_BUG_ON_ZERO((perms) & 2) + \
1016 (perms))
1017#endif
1018