1#ifndef _LINUX_KERNEL_H
2#define _LINUX_KERNEL_H
3
4
5#include <stdarg.h>
6#include <linux/linkage.h>
7#include <linux/stddef.h>
8#include <linux/types.h>
9#include <linux/compiler.h>
10#include <linux/bitops.h>
11#include <linux/log2.h>
12#include <linux/typecheck.h>
13#include <linux/printk.h>
14#include <linux/dynamic_debug.h>
15#include <asm/byteorder.h>
16#include <uapi/linux/kernel.h>
17
18#define USHRT_MAX ((u16)(~0U))
19#define SHRT_MAX ((s16)(USHRT_MAX>>1))
20#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
21#define INT_MAX ((int)(~0U>>1))
22#define INT_MIN (-INT_MAX - 1)
23#define UINT_MAX (~0U)
24#define LONG_MAX ((long)(~0UL>>1))
25#define LONG_MIN (-LONG_MAX - 1)
26#define ULONG_MAX (~0UL)
27#define LLONG_MAX ((long long)(~0ULL>>1))
28#define LLONG_MIN (-LLONG_MAX - 1)
29#define ULLONG_MAX (~0ULL)
30#define SIZE_MAX (~(size_t)0)
31
32#define U8_MAX ((u8)~0U)
33#define S8_MAX ((s8)(U8_MAX>>1))
34#define S8_MIN ((s8)(-S8_MAX - 1))
35#define U16_MAX ((u16)~0U)
36#define S16_MAX ((s16)(U16_MAX>>1))
37#define S16_MIN ((s16)(-S16_MAX - 1))
38#define U32_MAX ((u32)~0U)
39#define S32_MAX ((s32)(U32_MAX>>1))
40#define S32_MIN ((s32)(-S32_MAX - 1))
41#define U64_MAX ((u64)~0ULL)
42#define S64_MAX ((s64)(U64_MAX>>1))
43#define S64_MIN ((s64)(-S64_MAX - 1))
44
45#define STACK_MAGIC 0xdeadbeef
46
47#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
48
49#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
50#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
51#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
52#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
53
54#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
55
56
57
58
59
60
61
62#define __round_mask(x, y) ((__typeof__(x))((y)-1))
63#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
64#define round_down(x, y) ((x) & ~__round_mask(x, y))
65
66#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
67#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
68#define DIV_ROUND_UP_ULL(ll,d) \
69 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
70
71#if BITS_PER_LONG == 32
72# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
73#else
74# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
75#endif
76
77
78#define roundup(x, y) ( \
79{ \
80 const typeof(y) __y = y; \
81 (((x) + (__y - 1)) / __y) * __y; \
82} \
83)
84#define rounddown(x, y) ( \
85{ \
86 typeof(x) __x = (x); \
87 __x - (__x % (y)); \
88} \
89)
90
91
92
93
94
95
96#define DIV_ROUND_CLOSEST(x, divisor)( \
97{ \
98 typeof(x) __x = x; \
99 typeof(divisor) __d = divisor; \
100 (((typeof(x))-1) > 0 || \
101 ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
102 (((__x) + ((__d) / 2)) / (__d)) : \
103 (((__x) - ((__d) / 2)) / (__d)); \
104} \
105)
106
107
108
109
110#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
111{ \
112 typeof(divisor) __d = divisor; \
113 unsigned long long _tmp = (x) + (__d) / 2; \
114 do_div(_tmp, __d); \
115 _tmp; \
116} \
117)
118
119
120
121
122
123#define mult_frac(x, numer, denom)( \
124{ \
125 typeof(x) quot = (x) / (denom); \
126 typeof(x) rem = (x) % (denom); \
127 (quot * (numer)) + ((rem * (numer)) / (denom)); \
128} \
129)
130
131
132#define _RET_IP_ (unsigned long)__builtin_return_address(0)
133#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
134
135#ifdef CONFIG_LBDAF
136# include <asm/div64.h>
137# define sector_div(a, b) do_div(a, b)
138#else
139# define sector_div(n, b)( \
140{ \
141 int _res; \
142 _res = (n) % (b); \
143 (n) /= (b); \
144 _res; \
145} \
146)
147#endif
148
149
150
151
152
153
154
155
156
157#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
158
159
160
161
162
163#define lower_32_bits(n) ((u32)(n))
164
165struct completion;
166struct pt_regs;
167struct user;
168
169#ifdef CONFIG_PREEMPT_VOLUNTARY
170extern int _cond_resched(void);
171# define might_resched() _cond_resched()
172#else
173# define might_resched() do { } while (0)
174#endif
175
176#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
177 void ___might_sleep(const char *file, int line, int preempt_offset);
178 void __might_sleep(const char *file, int line, int preempt_offset);
179
180
181
182
183
184
185
186
187
188
189# define might_sleep() \
190 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
191# define sched_annotate_sleep() (current->task_state_change = 0)
192#else
193 static inline void ___might_sleep(const char *file, int line,
194 int preempt_offset) { }
195 static inline void __might_sleep(const char *file, int line,
196 int preempt_offset) { }
197# define might_sleep() do { might_resched(); } while (0)
198# define sched_annotate_sleep() do { } while (0)
199#endif
200
201#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
202
203
204
205
206
207
208
209#define abs(x) ({ \
210 long ret; \
211 if (sizeof(x) == sizeof(long)) { \
212 long __x = (x); \
213 ret = (__x < 0) ? -__x : __x; \
214 } else { \
215 int __x = (x); \
216 ret = (__x < 0) ? -__x : __x; \
217 } \
218 ret; \
219 })
220
221#define abs64(x) ({ \
222 s64 __x = (x); \
223 (__x < 0) ? -__x : __x; \
224 })
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
241{
242 return (u32)(((u64) val * ep_ro) >> 32);
243}
244
245#if defined(CONFIG_MMU) && \
246 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
247void might_fault(void);
248#else
249static inline void might_fault(void) { }
250#endif
251
252extern struct atomic_notifier_head panic_notifier_list;
253extern long (*panic_blink)(int state);
254__printf(1, 2)
255void panic(const char *fmt, ...)
256 __noreturn __cold;
257extern void oops_enter(void);
258extern void oops_exit(void);
259void print_oops_end_marker(void);
260extern int oops_may_print(void);
261void do_exit(long error_code)
262 __noreturn;
263void complete_and_exit(struct completion *, long)
264 __noreturn;
265
266
267int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
268int __must_check _kstrtol(const char *s, unsigned int base, long *res);
269
270int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
271int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
290{
291
292
293
294
295 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
296 __alignof__(unsigned long) == __alignof__(unsigned long long))
297 return kstrtoull(s, base, (unsigned long long *)res);
298 else
299 return _kstrtoul(s, base, res);
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
319{
320
321
322
323
324 if (sizeof(long) == sizeof(long long) &&
325 __alignof__(long) == __alignof__(long long))
326 return kstrtoll(s, base, (long long *)res);
327 else
328 return _kstrtol(s, base, res);
329}
330
331int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
332int __must_check kstrtoint(const char *s, unsigned int base, int *res);
333
334static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
335{
336 return kstrtoull(s, base, res);
337}
338
339static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
340{
341 return kstrtoll(s, base, res);
342}
343
344static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
345{
346 return kstrtouint(s, base, res);
347}
348
349static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
350{
351 return kstrtoint(s, base, res);
352}
353
354int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
355int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
356int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
357int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
358
359int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
360int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
361int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
362int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
363int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
364int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
365int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
366int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
367int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
368int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
369
370static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
371{
372 return kstrtoull_from_user(s, count, base, res);
373}
374
375static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
376{
377 return kstrtoll_from_user(s, count, base, res);
378}
379
380static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
381{
382 return kstrtouint_from_user(s, count, base, res);
383}
384
385static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
386{
387 return kstrtoint_from_user(s, count, base, res);
388}
389
390
391
392extern unsigned long simple_strtoul(const char *,char **,unsigned int);
393extern long simple_strtol(const char *,char **,unsigned int);
394extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
395extern long long simple_strtoll(const char *,char **,unsigned int);
396
397extern int num_to_str(char *buf, int size, unsigned long long num);
398
399
400
401extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
402extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
403extern __printf(3, 4)
404int snprintf(char *buf, size_t size, const char *fmt, ...);
405extern __printf(3, 0)
406int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
407extern __printf(3, 4)
408int scnprintf(char *buf, size_t size, const char *fmt, ...);
409extern __printf(3, 0)
410int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
411extern __printf(2, 3)
412char *kasprintf(gfp_t gfp, const char *fmt, ...);
413extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
414
415extern __scanf(2, 3)
416int sscanf(const char *, const char *, ...);
417extern __scanf(2, 0)
418int vsscanf(const char *, const char *, va_list);
419
420extern int get_option(char **str, int *pint);
421extern char *get_options(const char *str, int nints, int *ints);
422extern unsigned long long memparse(const char *ptr, char **retptr);
423extern bool parse_option_str(const char *str, const char *option);
424
425extern int core_kernel_text(unsigned long addr);
426extern int core_kernel_data(unsigned long addr);
427extern int __kernel_text_address(unsigned long addr);
428extern int kernel_text_address(unsigned long addr);
429extern int func_ptr_is_kernel_text(void *ptr);
430
431unsigned long int_sqrt(unsigned long);
432
433extern void bust_spinlocks(int yes);
434extern int oops_in_progress;
435extern int panic_timeout;
436extern int panic_on_oops;
437extern int panic_on_unrecovered_nmi;
438extern int panic_on_io_nmi;
439extern int panic_on_warn;
440extern int sysctl_panic_on_stackoverflow;
441
442
443
444
445static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
446{
447 if (panic_timeout == arch_default_timeout)
448 panic_timeout = timeout;
449}
450extern const char *print_tainted(void);
451enum lockdep_ok {
452 LOCKDEP_STILL_OK,
453 LOCKDEP_NOW_UNRELIABLE
454};
455extern void add_taint(unsigned flag, enum lockdep_ok);
456extern int test_taint(unsigned flag);
457extern unsigned long get_taint(void);
458extern int root_mountflags;
459
460extern bool early_boot_irqs_disabled;
461
462
463extern enum system_states {
464 SYSTEM_BOOTING,
465 SYSTEM_RUNNING,
466 SYSTEM_HALT,
467 SYSTEM_POWER_OFF,
468 SYSTEM_RESTART,
469} system_state;
470
471#define TAINT_PROPRIETARY_MODULE 0
472#define TAINT_FORCED_MODULE 1
473#define TAINT_CPU_OUT_OF_SPEC 2
474#define TAINT_FORCED_RMMOD 3
475#define TAINT_MACHINE_CHECK 4
476#define TAINT_BAD_PAGE 5
477#define TAINT_USER 6
478#define TAINT_DIE 7
479#define TAINT_OVERRIDDEN_ACPI_TABLE 8
480#define TAINT_WARN 9
481#define TAINT_CRAP 10
482#define TAINT_FIRMWARE_WORKAROUND 11
483#define TAINT_OOT_MODULE 12
484#define TAINT_UNSIGNED_MODULE 13
485#define TAINT_SOFTLOCKUP 14
486#define TAINT_LIVEPATCH 15
487
488extern const char hex_asc[];
489#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
490#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
491
492static inline char *hex_byte_pack(char *buf, u8 byte)
493{
494 *buf++ = hex_asc_hi(byte);
495 *buf++ = hex_asc_lo(byte);
496 return buf;
497}
498
499extern const char hex_asc_upper[];
500#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
501#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
502
503static inline char *hex_byte_pack_upper(char *buf, u8 byte)
504{
505 *buf++ = hex_asc_upper_hi(byte);
506 *buf++ = hex_asc_upper_lo(byte);
507 return buf;
508}
509
510extern int hex_to_bin(char ch);
511extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
512extern char *bin2hex(char *dst, const void *src, size_t count);
513
514bool mac_pton(const char *s, u8 *mac);
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535#ifdef CONFIG_RING_BUFFER
536
537void tracing_off_permanent(void);
538#else
539static inline void tracing_off_permanent(void) { }
540#endif
541
542enum ftrace_dump_mode {
543 DUMP_NONE,
544 DUMP_ALL,
545 DUMP_ORIG,
546};
547
548#ifdef CONFIG_TRACING
549void tracing_on(void);
550void tracing_off(void);
551int tracing_is_on(void);
552void tracing_snapshot(void);
553void tracing_snapshot_alloc(void);
554
555extern void tracing_start(void);
556extern void tracing_stop(void);
557
558static inline __printf(1, 2)
559void ____trace_printk_check_format(const char *fmt, ...)
560{
561}
562#define __trace_printk_check_format(fmt, args...) \
563do { \
564 if (0) \
565 ____trace_printk_check_format(fmt, ##args); \
566} while (0)
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598#define trace_printk(fmt, ...) \
599do { \
600 char _______STR[] = __stringify((__VA_ARGS__)); \
601 if (sizeof(_______STR) > 3) \
602 do_trace_printk(fmt, ##__VA_ARGS__); \
603 else \
604 trace_puts(fmt); \
605} while (0)
606
607#define do_trace_printk(fmt, args...) \
608do { \
609 static const char *trace_printk_fmt \
610 __attribute__((section("__trace_printk_fmt"))) = \
611 __builtin_constant_p(fmt) ? fmt : NULL; \
612 \
613 __trace_printk_check_format(fmt, ##args); \
614 \
615 if (__builtin_constant_p(fmt)) \
616 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
617 else \
618 __trace_printk(_THIS_IP_, fmt, ##args); \
619} while (0)
620
621extern __printf(2, 3)
622int __trace_bprintk(unsigned long ip, const char *fmt, ...);
623
624extern __printf(2, 3)
625int __trace_printk(unsigned long ip, const char *fmt, ...);
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652#define trace_puts(str) ({ \
653 static const char *trace_printk_fmt \
654 __attribute__((section("__trace_printk_fmt"))) = \
655 __builtin_constant_p(str) ? str : NULL; \
656 \
657 if (__builtin_constant_p(str)) \
658 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
659 else \
660 __trace_puts(_THIS_IP_, str, strlen(str)); \
661})
662extern int __trace_bputs(unsigned long ip, const char *str);
663extern int __trace_puts(unsigned long ip, const char *str, int size);
664
665extern void trace_dump_stack(int skip);
666
667
668
669
670
671
672#define ftrace_vprintk(fmt, vargs) \
673do { \
674 if (__builtin_constant_p(fmt)) { \
675 static const char *trace_printk_fmt \
676 __attribute__((section("__trace_printk_fmt"))) = \
677 __builtin_constant_p(fmt) ? fmt : NULL; \
678 \
679 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
680 } else \
681 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
682} while (0)
683
684extern int
685__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
686
687extern int
688__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
689
690extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
691#else
692static inline void tracing_start(void) { }
693static inline void tracing_stop(void) { }
694static inline void trace_dump_stack(int skip) { }
695
696static inline void tracing_on(void) { }
697static inline void tracing_off(void) { }
698static inline int tracing_is_on(void) { return 0; }
699static inline void tracing_snapshot(void) { }
700static inline void tracing_snapshot_alloc(void) { }
701
702static inline __printf(1, 2)
703int trace_printk(const char *fmt, ...)
704{
705 return 0;
706}
707static inline int
708ftrace_vprintk(const char *fmt, va_list ap)
709{
710 return 0;
711}
712static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
713#endif
714
715
716
717
718
719
720#define min(x, y) ({ \
721 typeof(x) _min1 = (x); \
722 typeof(y) _min2 = (y); \
723 (void) (&_min1 == &_min2); \
724 _min1 < _min2 ? _min1 : _min2; })
725
726#define max(x, y) ({ \
727 typeof(x) _max1 = (x); \
728 typeof(y) _max2 = (y); \
729 (void) (&_max1 == &_max2); \
730 _max1 > _max2 ? _max1 : _max2; })
731
732#define min3(x, y, z) min((typeof(x))min(x, y), z)
733#define max3(x, y, z) max((typeof(x))max(x, y), z)
734
735
736
737
738
739
740#define min_not_zero(x, y) ({ \
741 typeof(x) __x = (x); \
742 typeof(y) __y = (y); \
743 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
744
745
746
747
748
749
750
751
752
753
754#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
755
756
757
758
759
760
761
762#define min_t(type, x, y) ({ \
763 type __min1 = (x); \
764 type __min2 = (y); \
765 __min1 < __min2 ? __min1: __min2; })
766
767#define max_t(type, x, y) ({ \
768 type __max1 = (x); \
769 type __max2 = (y); \
770 __max1 > __max2 ? __max1: __max2; })
771
772
773
774
775
776
777
778
779
780
781
782#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
783
784
785
786
787
788
789
790
791
792
793
794
795#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
796
797
798
799
800
801#define swap(a, b) \
802 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
803
804
805
806
807
808
809
810
811#define container_of(ptr, type, member) ({ \
812 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
813 (type *)( (char *)__mptr - offsetof(type,member) );})
814
815
816#ifdef CONFIG_FTRACE_MCOUNT_RECORD
817# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
818#endif
819
820
821#define VERIFY_OCTAL_PERMISSIONS(perms) \
822 (BUILD_BUG_ON_ZERO((perms) < 0) + \
823 BUILD_BUG_ON_ZERO((perms) > 0777) + \
824 \
825 BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
826 BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
827 \
828 BUILD_BUG_ON_ZERO((perms) & 2) + \
829 (perms))
830#endif
831