1#ifndef _LINUX_KERNEL_H
2#define _LINUX_KERNEL_H
3
4
5#include <stdarg.h>
6#include <linux/linkage.h>
7#include <linux/stddef.h>
8#include <linux/types.h>
9#include <linux/compiler.h>
10#include <linux/bitops.h>
11#include <linux/log2.h>
12#include <linux/typecheck.h>
13#include <linux/printk.h>
14#include <linux/dynamic_debug.h>
15#include <asm/byteorder.h>
16#include <uapi/linux/kernel.h>
17
18#define USHRT_MAX ((u16)(~0U))
19#define SHRT_MAX ((s16)(USHRT_MAX>>1))
20#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
21#define INT_MAX ((int)(~0U>>1))
22#define INT_MIN (-INT_MAX - 1)
23#define UINT_MAX (~0U)
24#define LONG_MAX ((long)(~0UL>>1))
25#define LONG_MIN (-LONG_MAX - 1)
26#define ULONG_MAX (~0UL)
27#define LLONG_MAX ((long long)(~0ULL>>1))
28#define LLONG_MIN (-LLONG_MAX - 1)
29#define ULLONG_MAX (~0ULL)
30#define SIZE_MAX (~(size_t)0)
31
32#define U8_MAX ((u8)~0U)
33#define S8_MAX ((s8)(U8_MAX>>1))
34#define S8_MIN ((s8)(-S8_MAX - 1))
35#define U16_MAX ((u16)~0U)
36#define S16_MAX ((s16)(U16_MAX>>1))
37#define S16_MIN ((s16)(-S16_MAX - 1))
38#define U32_MAX ((u32)~0U)
39#define S32_MAX ((s32)(U32_MAX>>1))
40#define S32_MIN ((s32)(-S32_MAX - 1))
41#define U64_MAX ((u64)~0ULL)
42#define S64_MAX ((s64)(U64_MAX>>1))
43#define S64_MIN ((s64)(-S64_MAX - 1))
44
45#define STACK_MAGIC 0xdeadbeef
46
47#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
48
49#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
50#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
51#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
52#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
53
54#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
55
56
57
58
59
60
61
62#define __round_mask(x, y) ((__typeof__(x))((y)-1))
63#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
64#define round_down(x, y) ((x) & ~__round_mask(x, y))
65
66#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
67#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
68#define DIV_ROUND_UP_ULL(ll,d) \
69 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
70
71#if BITS_PER_LONG == 32
72# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
73#else
74# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
75#endif
76
77
78#define roundup(x, y) ( \
79{ \
80 const typeof(y) __y = y; \
81 (((x) + (__y - 1)) / __y) * __y; \
82} \
83)
84#define rounddown(x, y) ( \
85{ \
86 typeof(x) __x = (x); \
87 __x - (__x % (y)); \
88} \
89)
90
91
92
93
94
95
96#define DIV_ROUND_CLOSEST(x, divisor)( \
97{ \
98 typeof(x) __x = x; \
99 typeof(divisor) __d = divisor; \
100 (((typeof(x))-1) > 0 || \
101 ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
102 (((__x) + ((__d) / 2)) / (__d)) : \
103 (((__x) - ((__d) / 2)) / (__d)); \
104} \
105)
106
107
108
109
110#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
111{ \
112 typeof(divisor) __d = divisor; \
113 unsigned long long _tmp = (x) + (__d) / 2; \
114 do_div(_tmp, __d); \
115 _tmp; \
116} \
117)
118
119
120
121
122
123#define mult_frac(x, numer, denom)( \
124{ \
125 typeof(x) quot = (x) / (denom); \
126 typeof(x) rem = (x) % (denom); \
127 (quot * (numer)) + ((rem * (numer)) / (denom)); \
128} \
129)
130
131
132#define _RET_IP_ (unsigned long)__builtin_return_address(0)
133#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
134
135#ifdef CONFIG_LBDAF
136# include <asm/div64.h>
137# define sector_div(a, b) do_div(a, b)
138#else
139# define sector_div(n, b)( \
140{ \
141 int _res; \
142 _res = (n) % (b); \
143 (n) /= (b); \
144 _res; \
145} \
146)
147#endif
148
149
150
151
152
153
154
155
156
157#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
158
159
160
161
162
163#define lower_32_bits(n) ((u32)(n))
164
165struct completion;
166struct pt_regs;
167struct user;
168
169#ifdef CONFIG_PREEMPT_VOLUNTARY
170extern int _cond_resched(void);
171# define might_resched() _cond_resched()
172#else
173# define might_resched() do { } while (0)
174#endif
175
176#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
177 void ___might_sleep(const char *file, int line, int preempt_offset);
178 void __might_sleep(const char *file, int line, int preempt_offset);
179
180
181
182
183
184
185
186
187
188
189# define might_sleep() \
190 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
191# define sched_annotate_sleep() (current->task_state_change = 0)
192#else
193 static inline void ___might_sleep(const char *file, int line,
194 int preempt_offset) { }
195 static inline void __might_sleep(const char *file, int line,
196 int preempt_offset) { }
197# define might_sleep() do { might_resched(); } while (0)
198# define sched_annotate_sleep() do { } while (0)
199#endif
200
201#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
202
203
204
205
206
207
208
209#define abs(x) ({ \
210 long ret; \
211 if (sizeof(x) == sizeof(long)) { \
212 long __x = (x); \
213 ret = (__x < 0) ? -__x : __x; \
214 } else { \
215 int __x = (x); \
216 ret = (__x < 0) ? -__x : __x; \
217 } \
218 ret; \
219 })
220
221#define abs64(x) ({ \
222 s64 __x = (x); \
223 (__x < 0) ? -__x : __x; \
224 })
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
241{
242 return (u32)(((u64) val * ep_ro) >> 32);
243}
244
245#if defined(CONFIG_MMU) && \
246 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
247#define might_fault() __might_fault(__FILE__, __LINE__)
248void __might_fault(const char *file, int line);
249#else
250static inline void might_fault(void) { }
251#endif
252
253extern struct atomic_notifier_head panic_notifier_list;
254extern long (*panic_blink)(int state);
255__printf(1, 2)
256void panic(const char *fmt, ...)
257 __noreturn __cold;
258extern void oops_enter(void);
259extern void oops_exit(void);
260void print_oops_end_marker(void);
261extern int oops_may_print(void);
262void do_exit(long error_code)
263 __noreturn;
264void complete_and_exit(struct completion *, long)
265 __noreturn;
266
267
268int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
269int __must_check _kstrtol(const char *s, unsigned int base, long *res);
270
271int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
272int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
291{
292
293
294
295
296 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
297 __alignof__(unsigned long) == __alignof__(unsigned long long))
298 return kstrtoull(s, base, (unsigned long long *)res);
299 else
300 return _kstrtoul(s, base, res);
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
320{
321
322
323
324
325 if (sizeof(long) == sizeof(long long) &&
326 __alignof__(long) == __alignof__(long long))
327 return kstrtoll(s, base, (long long *)res);
328 else
329 return _kstrtol(s, base, res);
330}
331
332int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
333int __must_check kstrtoint(const char *s, unsigned int base, int *res);
334
335static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
336{
337 return kstrtoull(s, base, res);
338}
339
340static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
341{
342 return kstrtoll(s, base, res);
343}
344
345static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
346{
347 return kstrtouint(s, base, res);
348}
349
350static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
351{
352 return kstrtoint(s, base, res);
353}
354
355int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
356int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
357int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
358int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
359
360int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
361int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
362int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
363int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
364int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
365int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
366int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
367int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
368int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
369int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
370
371static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
372{
373 return kstrtoull_from_user(s, count, base, res);
374}
375
376static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
377{
378 return kstrtoll_from_user(s, count, base, res);
379}
380
381static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
382{
383 return kstrtouint_from_user(s, count, base, res);
384}
385
386static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
387{
388 return kstrtoint_from_user(s, count, base, res);
389}
390
391
392
393extern unsigned long simple_strtoul(const char *,char **,unsigned int);
394extern long simple_strtol(const char *,char **,unsigned int);
395extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
396extern long long simple_strtoll(const char *,char **,unsigned int);
397
398extern int num_to_str(char *buf, int size, unsigned long long num);
399
400
401
402extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
403extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
404extern __printf(3, 4)
405int snprintf(char *buf, size_t size, const char *fmt, ...);
406extern __printf(3, 0)
407int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
408extern __printf(3, 4)
409int scnprintf(char *buf, size_t size, const char *fmt, ...);
410extern __printf(3, 0)
411int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
412extern __printf(2, 3)
413char *kasprintf(gfp_t gfp, const char *fmt, ...);
414extern __printf(2, 0)
415char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
416
417extern __scanf(2, 3)
418int sscanf(const char *, const char *, ...);
419extern __scanf(2, 0)
420int vsscanf(const char *, const char *, va_list);
421
422extern int get_option(char **str, int *pint);
423extern char *get_options(const char *str, int nints, int *ints);
424extern unsigned long long memparse(const char *ptr, char **retptr);
425extern bool parse_option_str(const char *str, const char *option);
426
427extern int core_kernel_text(unsigned long addr);
428extern int core_kernel_data(unsigned long addr);
429extern int __kernel_text_address(unsigned long addr);
430extern int kernel_text_address(unsigned long addr);
431extern int func_ptr_is_kernel_text(void *ptr);
432
433unsigned long int_sqrt(unsigned long);
434
435extern void bust_spinlocks(int yes);
436extern int oops_in_progress;
437extern int panic_timeout;
438extern int panic_on_oops;
439extern int panic_on_unrecovered_nmi;
440extern int panic_on_io_nmi;
441extern int panic_on_warn;
442extern int sysctl_panic_on_stackoverflow;
443
444extern bool crash_kexec_post_notifiers;
445
446
447
448
449
450static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
451{
452 if (panic_timeout == arch_default_timeout)
453 panic_timeout = timeout;
454}
455extern const char *print_tainted(void);
456enum lockdep_ok {
457 LOCKDEP_STILL_OK,
458 LOCKDEP_NOW_UNRELIABLE
459};
460extern void add_taint(unsigned flag, enum lockdep_ok);
461extern int test_taint(unsigned flag);
462extern unsigned long get_taint(void);
463extern int root_mountflags;
464
465extern bool early_boot_irqs_disabled;
466
467
468extern enum system_states {
469 SYSTEM_BOOTING,
470 SYSTEM_RUNNING,
471 SYSTEM_HALT,
472 SYSTEM_POWER_OFF,
473 SYSTEM_RESTART,
474} system_state;
475
476#define TAINT_PROPRIETARY_MODULE 0
477#define TAINT_FORCED_MODULE 1
478#define TAINT_CPU_OUT_OF_SPEC 2
479#define TAINT_FORCED_RMMOD 3
480#define TAINT_MACHINE_CHECK 4
481#define TAINT_BAD_PAGE 5
482#define TAINT_USER 6
483#define TAINT_DIE 7
484#define TAINT_OVERRIDDEN_ACPI_TABLE 8
485#define TAINT_WARN 9
486#define TAINT_CRAP 10
487#define TAINT_FIRMWARE_WORKAROUND 11
488#define TAINT_OOT_MODULE 12
489#define TAINT_UNSIGNED_MODULE 13
490#define TAINT_SOFTLOCKUP 14
491#define TAINT_LIVEPATCH 15
492
493extern const char hex_asc[];
494#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
495#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
496
497static inline char *hex_byte_pack(char *buf, u8 byte)
498{
499 *buf++ = hex_asc_hi(byte);
500 *buf++ = hex_asc_lo(byte);
501 return buf;
502}
503
504extern const char hex_asc_upper[];
505#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
506#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
507
508static inline char *hex_byte_pack_upper(char *buf, u8 byte)
509{
510 *buf++ = hex_asc_upper_hi(byte);
511 *buf++ = hex_asc_upper_lo(byte);
512 return buf;
513}
514
515extern int hex_to_bin(char ch);
516extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
517extern char *bin2hex(char *dst, const void *src, size_t count);
518
519bool mac_pton(const char *s, u8 *mac);
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541enum ftrace_dump_mode {
542 DUMP_NONE,
543 DUMP_ALL,
544 DUMP_ORIG,
545};
546
547#ifdef CONFIG_TRACING
548void tracing_on(void);
549void tracing_off(void);
550int tracing_is_on(void);
551void tracing_snapshot(void);
552void tracing_snapshot_alloc(void);
553
554extern void tracing_start(void);
555extern void tracing_stop(void);
556
557static inline __printf(1, 2)
558void ____trace_printk_check_format(const char *fmt, ...)
559{
560}
561#define __trace_printk_check_format(fmt, args...) \
562do { \
563 if (0) \
564 ____trace_printk_check_format(fmt, ##args); \
565} while (0)
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597#define trace_printk(fmt, ...) \
598do { \
599 char _______STR[] = __stringify((__VA_ARGS__)); \
600 if (sizeof(_______STR) > 3) \
601 do_trace_printk(fmt, ##__VA_ARGS__); \
602 else \
603 trace_puts(fmt); \
604} while (0)
605
606#define do_trace_printk(fmt, args...) \
607do { \
608 static const char *trace_printk_fmt \
609 __attribute__((section("__trace_printk_fmt"))) = \
610 __builtin_constant_p(fmt) ? fmt : NULL; \
611 \
612 __trace_printk_check_format(fmt, ##args); \
613 \
614 if (__builtin_constant_p(fmt)) \
615 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
616 else \
617 __trace_printk(_THIS_IP_, fmt, ##args); \
618} while (0)
619
620extern __printf(2, 3)
621int __trace_bprintk(unsigned long ip, const char *fmt, ...);
622
623extern __printf(2, 3)
624int __trace_printk(unsigned long ip, const char *fmt, ...);
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651#define trace_puts(str) ({ \
652 static const char *trace_printk_fmt \
653 __attribute__((section("__trace_printk_fmt"))) = \
654 __builtin_constant_p(str) ? str : NULL; \
655 \
656 if (__builtin_constant_p(str)) \
657 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
658 else \
659 __trace_puts(_THIS_IP_, str, strlen(str)); \
660})
661extern int __trace_bputs(unsigned long ip, const char *str);
662extern int __trace_puts(unsigned long ip, const char *str, int size);
663
664extern void trace_dump_stack(int skip);
665
666
667
668
669
670
671#define ftrace_vprintk(fmt, vargs) \
672do { \
673 if (__builtin_constant_p(fmt)) { \
674 static const char *trace_printk_fmt \
675 __attribute__((section("__trace_printk_fmt"))) = \
676 __builtin_constant_p(fmt) ? fmt : NULL; \
677 \
678 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
679 } else \
680 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
681} while (0)
682
683extern __printf(2, 0) int
684__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
685
686extern __printf(2, 0) int
687__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
688
689extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
690#else
691static inline void tracing_start(void) { }
692static inline void tracing_stop(void) { }
693static inline void trace_dump_stack(int skip) { }
694
695static inline void tracing_on(void) { }
696static inline void tracing_off(void) { }
697static inline int tracing_is_on(void) { return 0; }
698static inline void tracing_snapshot(void) { }
699static inline void tracing_snapshot_alloc(void) { }
700
701static inline __printf(1, 2)
702int trace_printk(const char *fmt, ...)
703{
704 return 0;
705}
706static __printf(1, 0) inline int
707ftrace_vprintk(const char *fmt, va_list ap)
708{
709 return 0;
710}
711static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
712#endif
713
714
715
716
717
718
719#define min(x, y) ({ \
720 typeof(x) _min1 = (x); \
721 typeof(y) _min2 = (y); \
722 (void) (&_min1 == &_min2); \
723 _min1 < _min2 ? _min1 : _min2; })
724
725#define max(x, y) ({ \
726 typeof(x) _max1 = (x); \
727 typeof(y) _max2 = (y); \
728 (void) (&_max1 == &_max2); \
729 _max1 > _max2 ? _max1 : _max2; })
730
731#define min3(x, y, z) min((typeof(x))min(x, y), z)
732#define max3(x, y, z) max((typeof(x))max(x, y), z)
733
734
735
736
737
738
739#define min_not_zero(x, y) ({ \
740 typeof(x) __x = (x); \
741 typeof(y) __y = (y); \
742 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
743
744
745
746
747
748
749
750
751
752
753#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
754
755
756
757
758
759
760
761#define min_t(type, x, y) ({ \
762 type __min1 = (x); \
763 type __min2 = (y); \
764 __min1 < __min2 ? __min1: __min2; })
765
766#define max_t(type, x, y) ({ \
767 type __max1 = (x); \
768 type __max2 = (y); \
769 __max1 > __max2 ? __max1: __max2; })
770
771
772
773
774
775
776
777
778
779
780
781#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
782
783
784
785
786
787
788
789
790
791
792
793
794#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
795
796
797
798
799
800#define swap(a, b) \
801 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
802
803
804
805
806
807
808
809
810#define container_of(ptr, type, member) ({ \
811 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
812 (type *)( (char *)__mptr - offsetof(type,member) );})
813
814
815#ifdef CONFIG_FTRACE_MCOUNT_RECORD
816# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
817#endif
818
819
820#define VERIFY_OCTAL_PERMISSIONS(perms) \
821 (BUILD_BUG_ON_ZERO((perms) < 0) + \
822 BUILD_BUG_ON_ZERO((perms) > 0777) + \
823 \
824 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
825 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
826 \
827 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
828 \
829 BUILD_BUG_ON_ZERO((perms) & 2) + \
830 (perms))
831#endif
832