1#ifndef _LINUX_KERNEL_H
2#define _LINUX_KERNEL_H
3
4
5#include <stdarg.h>
6#include <linux/linkage.h>
7#include <linux/stddef.h>
8#include <linux/types.h>
9#include <linux/compiler.h>
10#include <linux/bitops.h>
11#include <linux/log2.h>
12#include <linux/typecheck.h>
13#include <linux/printk.h>
14#include <linux/dynamic_debug.h>
15#include <asm/byteorder.h>
16#include <uapi/linux/kernel.h>
17
18#define USHRT_MAX ((u16)(~0U))
19#define SHRT_MAX ((s16)(USHRT_MAX>>1))
20#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
21#define INT_MAX ((int)(~0U>>1))
22#define INT_MIN (-INT_MAX - 1)
23#define UINT_MAX (~0U)
24#define LONG_MAX ((long)(~0UL>>1))
25#define LONG_MIN (-LONG_MAX - 1)
26#define ULONG_MAX (~0UL)
27#define LLONG_MAX ((long long)(~0ULL>>1))
28#define LLONG_MIN (-LLONG_MAX - 1)
29#define ULLONG_MAX (~0ULL)
30#define SIZE_MAX (~(size_t)0)
31
32#define U8_MAX ((u8)~0U)
33#define S8_MAX ((s8)(U8_MAX>>1))
34#define S8_MIN ((s8)(-S8_MAX - 1))
35#define U16_MAX ((u16)~0U)
36#define S16_MAX ((s16)(U16_MAX>>1))
37#define S16_MIN ((s16)(-S16_MAX - 1))
38#define U32_MAX ((u32)~0U)
39#define S32_MAX ((s32)(U32_MAX>>1))
40#define S32_MIN ((s32)(-S32_MAX - 1))
41#define U64_MAX ((u64)~0ULL)
42#define S64_MAX ((s64)(U64_MAX>>1))
43#define S64_MIN ((s64)(-S64_MAX - 1))
44
45#define STACK_MAGIC 0xdeadbeef
46
47#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
48
49#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
50#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
51#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
52#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
53
54#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
55
56#define u64_to_user_ptr(x) ( \
57{ \
58 typecheck(u64, x); \
59 (void __user *)(uintptr_t)x; \
60} \
61)
62
63
64
65
66
67
68
69#define __round_mask(x, y) ((__typeof__(x))((y)-1))
70#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
71#define round_down(x, y) ((x) & ~__round_mask(x, y))
72
73#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
74#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
75#define DIV_ROUND_UP_ULL(ll,d) \
76 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
77
78#if BITS_PER_LONG == 32
79# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
80#else
81# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
82#endif
83
84
85#define roundup(x, y) ( \
86{ \
87 const typeof(y) __y = y; \
88 (((x) + (__y - 1)) / __y) * __y; \
89} \
90)
91#define rounddown(x, y) ( \
92{ \
93 typeof(x) __x = (x); \
94 __x - (__x % (y)); \
95} \
96)
97
98
99
100
101
102
103#define DIV_ROUND_CLOSEST(x, divisor)( \
104{ \
105 typeof(x) __x = x; \
106 typeof(divisor) __d = divisor; \
107 (((typeof(x))-1) > 0 || \
108 ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
109 (((__x) + ((__d) / 2)) / (__d)) : \
110 (((__x) - ((__d) / 2)) / (__d)); \
111} \
112)
113
114
115
116
117#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
118{ \
119 typeof(divisor) __d = divisor; \
120 unsigned long long _tmp = (x) + (__d) / 2; \
121 do_div(_tmp, __d); \
122 _tmp; \
123} \
124)
125
126
127
128
129
130#define mult_frac(x, numer, denom)( \
131{ \
132 typeof(x) quot = (x) / (denom); \
133 typeof(x) rem = (x) % (denom); \
134 (quot * (numer)) + ((rem * (numer)) / (denom)); \
135} \
136)
137
138
139#define _RET_IP_ (unsigned long)__builtin_return_address(0)
140#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
141
142#ifdef CONFIG_LBDAF
143# include <asm/div64.h>
144# define sector_div(a, b) do_div(a, b)
145#else
146# define sector_div(n, b)( \
147{ \
148 int _res; \
149 _res = (n) % (b); \
150 (n) /= (b); \
151 _res; \
152} \
153)
154#endif
155
156
157
158
159
160
161
162
163
164#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
165
166
167
168
169
170#define lower_32_bits(n) ((u32)(n))
171
172struct completion;
173struct pt_regs;
174struct user;
175
176#ifdef CONFIG_PREEMPT_VOLUNTARY
177extern int _cond_resched(void);
178# define might_resched() _cond_resched()
179#else
180# define might_resched() do { } while (0)
181#endif
182
183#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
184 void ___might_sleep(const char *file, int line, int preempt_offset);
185 void __might_sleep(const char *file, int line, int preempt_offset);
186
187
188
189
190
191
192
193
194
195
196# define might_sleep() \
197 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
198# define sched_annotate_sleep() (current->task_state_change = 0)
199#else
200 static inline void ___might_sleep(const char *file, int line,
201 int preempt_offset) { }
202 static inline void __might_sleep(const char *file, int line,
203 int preempt_offset) { }
204# define might_sleep() do { might_resched(); } while (0)
205# define sched_annotate_sleep() do { } while (0)
206#endif
207
208#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
209
210
211
212
213
214
215
216
217
218#define abs(x) __abs_choose_expr(x, long long, \
219 __abs_choose_expr(x, long, \
220 __abs_choose_expr(x, int, \
221 __abs_choose_expr(x, short, \
222 __abs_choose_expr(x, char, \
223 __builtin_choose_expr( \
224 __builtin_types_compatible_p(typeof(x), char), \
225 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
226 ((void)0)))))))
227
228#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
229 __builtin_types_compatible_p(typeof(x), signed type) || \
230 __builtin_types_compatible_p(typeof(x), unsigned type), \
231 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
248{
249 return (u32)(((u64) val * ep_ro) >> 32);
250}
251
252#if defined(CONFIG_MMU) && \
253 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
254#define might_fault() __might_fault(__FILE__, __LINE__)
255void __might_fault(const char *file, int line);
256#else
257static inline void might_fault(void) { }
258#endif
259
260extern struct atomic_notifier_head panic_notifier_list;
261extern long (*panic_blink)(int state);
262__printf(1, 2)
263void panic(const char *fmt, ...)
264 __noreturn __cold;
265void nmi_panic(struct pt_regs *regs, const char *msg);
266extern void oops_enter(void);
267extern void oops_exit(void);
268void print_oops_end_marker(void);
269extern int oops_may_print(void);
270void do_exit(long error_code)
271 __noreturn;
272void complete_and_exit(struct completion *, long)
273 __noreturn;
274
275
276int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
277int __must_check _kstrtol(const char *s, unsigned int base, long *res);
278
279int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
280int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
299{
300
301
302
303
304 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
305 __alignof__(unsigned long) == __alignof__(unsigned long long))
306 return kstrtoull(s, base, (unsigned long long *)res);
307 else
308 return _kstrtoul(s, base, res);
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
328{
329
330
331
332
333 if (sizeof(long) == sizeof(long long) &&
334 __alignof__(long) == __alignof__(long long))
335 return kstrtoll(s, base, (long long *)res);
336 else
337 return _kstrtol(s, base, res);
338}
339
340int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
341int __must_check kstrtoint(const char *s, unsigned int base, int *res);
342
343static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
344{
345 return kstrtoull(s, base, res);
346}
347
348static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
349{
350 return kstrtoll(s, base, res);
351}
352
353static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
354{
355 return kstrtouint(s, base, res);
356}
357
358static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
359{
360 return kstrtoint(s, base, res);
361}
362
363int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
364int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
365int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
366int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
367int __must_check kstrtobool(const char *s, bool *res);
368
369int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
370int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
371int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
372int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
373int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
374int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
375int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
376int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
377int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
378int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
379int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
380
381static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
382{
383 return kstrtoull_from_user(s, count, base, res);
384}
385
386static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
387{
388 return kstrtoll_from_user(s, count, base, res);
389}
390
391static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
392{
393 return kstrtouint_from_user(s, count, base, res);
394}
395
396static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
397{
398 return kstrtoint_from_user(s, count, base, res);
399}
400
401
402
403extern unsigned long simple_strtoul(const char *,char **,unsigned int);
404extern long simple_strtol(const char *,char **,unsigned int);
405extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
406extern long long simple_strtoll(const char *,char **,unsigned int);
407
408extern int num_to_str(char *buf, int size, unsigned long long num);
409
410
411
412extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
413extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
414extern __printf(3, 4)
415int snprintf(char *buf, size_t size, const char *fmt, ...);
416extern __printf(3, 0)
417int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
418extern __printf(3, 4)
419int scnprintf(char *buf, size_t size, const char *fmt, ...);
420extern __printf(3, 0)
421int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
422extern __printf(2, 3) __malloc
423char *kasprintf(gfp_t gfp, const char *fmt, ...);
424extern __printf(2, 0) __malloc
425char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
426extern __printf(2, 0)
427const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
428
429extern __scanf(2, 3)
430int sscanf(const char *, const char *, ...);
431extern __scanf(2, 0)
432int vsscanf(const char *, const char *, va_list);
433
434extern int get_option(char **str, int *pint);
435extern char *get_options(const char *str, int nints, int *ints);
436extern unsigned long long memparse(const char *ptr, char **retptr);
437extern bool parse_option_str(const char *str, const char *option);
438
439extern int core_kernel_text(unsigned long addr);
440extern int core_kernel_data(unsigned long addr);
441extern int __kernel_text_address(unsigned long addr);
442extern int kernel_text_address(unsigned long addr);
443extern int func_ptr_is_kernel_text(void *ptr);
444
445unsigned long int_sqrt(unsigned long);
446
447extern void bust_spinlocks(int yes);
448extern int oops_in_progress;
449extern int panic_timeout;
450extern int panic_on_oops;
451extern int panic_on_unrecovered_nmi;
452extern int panic_on_io_nmi;
453extern int panic_on_warn;
454extern int sysctl_panic_on_stackoverflow;
455
456extern bool crash_kexec_post_notifiers;
457
458
459
460
461
462
463extern atomic_t panic_cpu;
464#define PANIC_CPU_INVALID -1
465
466
467
468
469
470static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
471{
472 if (panic_timeout == arch_default_timeout)
473 panic_timeout = timeout;
474}
475extern const char *print_tainted(void);
476enum lockdep_ok {
477 LOCKDEP_STILL_OK,
478 LOCKDEP_NOW_UNRELIABLE
479};
480extern void add_taint(unsigned flag, enum lockdep_ok);
481extern int test_taint(unsigned flag);
482extern unsigned long get_taint(void);
483extern int root_mountflags;
484
485extern bool early_boot_irqs_disabled;
486
487
488extern enum system_states {
489 SYSTEM_BOOTING,
490 SYSTEM_RUNNING,
491 SYSTEM_HALT,
492 SYSTEM_POWER_OFF,
493 SYSTEM_RESTART,
494} system_state;
495
496#define TAINT_PROPRIETARY_MODULE 0
497#define TAINT_FORCED_MODULE 1
498#define TAINT_CPU_OUT_OF_SPEC 2
499#define TAINT_FORCED_RMMOD 3
500#define TAINT_MACHINE_CHECK 4
501#define TAINT_BAD_PAGE 5
502#define TAINT_USER 6
503#define TAINT_DIE 7
504#define TAINT_OVERRIDDEN_ACPI_TABLE 8
505#define TAINT_WARN 9
506#define TAINT_CRAP 10
507#define TAINT_FIRMWARE_WORKAROUND 11
508#define TAINT_OOT_MODULE 12
509#define TAINT_UNSIGNED_MODULE 13
510#define TAINT_SOFTLOCKUP 14
511#define TAINT_LIVEPATCH 15
512
513extern const char hex_asc[];
514#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
515#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
516
517static inline char *hex_byte_pack(char *buf, u8 byte)
518{
519 *buf++ = hex_asc_hi(byte);
520 *buf++ = hex_asc_lo(byte);
521 return buf;
522}
523
524extern const char hex_asc_upper[];
525#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
526#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
527
528static inline char *hex_byte_pack_upper(char *buf, u8 byte)
529{
530 *buf++ = hex_asc_upper_hi(byte);
531 *buf++ = hex_asc_upper_lo(byte);
532 return buf;
533}
534
535extern int hex_to_bin(char ch);
536extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
537extern char *bin2hex(char *dst, const void *src, size_t count);
538
539bool mac_pton(const char *s, u8 *mac);
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561enum ftrace_dump_mode {
562 DUMP_NONE,
563 DUMP_ALL,
564 DUMP_ORIG,
565};
566
567#ifdef CONFIG_TRACING
568void tracing_on(void);
569void tracing_off(void);
570int tracing_is_on(void);
571void tracing_snapshot(void);
572void tracing_snapshot_alloc(void);
573
574extern void tracing_start(void);
575extern void tracing_stop(void);
576
577static inline __printf(1, 2)
578void ____trace_printk_check_format(const char *fmt, ...)
579{
580}
581#define __trace_printk_check_format(fmt, args...) \
582do { \
583 if (0) \
584 ____trace_printk_check_format(fmt, ##args); \
585} while (0)
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617#define trace_printk(fmt, ...) \
618do { \
619 char _______STR[] = __stringify((__VA_ARGS__)); \
620 if (sizeof(_______STR) > 3) \
621 do_trace_printk(fmt, ##__VA_ARGS__); \
622 else \
623 trace_puts(fmt); \
624} while (0)
625
626#define do_trace_printk(fmt, args...) \
627do { \
628 static const char *trace_printk_fmt __used \
629 __attribute__((section("__trace_printk_fmt"))) = \
630 __builtin_constant_p(fmt) ? fmt : NULL; \
631 \
632 __trace_printk_check_format(fmt, ##args); \
633 \
634 if (__builtin_constant_p(fmt)) \
635 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
636 else \
637 __trace_printk(_THIS_IP_, fmt, ##args); \
638} while (0)
639
640extern __printf(2, 3)
641int __trace_bprintk(unsigned long ip, const char *fmt, ...);
642
643extern __printf(2, 3)
644int __trace_printk(unsigned long ip, const char *fmt, ...);
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671#define trace_puts(str) ({ \
672 static const char *trace_printk_fmt __used \
673 __attribute__((section("__trace_printk_fmt"))) = \
674 __builtin_constant_p(str) ? str : NULL; \
675 \
676 if (__builtin_constant_p(str)) \
677 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
678 else \
679 __trace_puts(_THIS_IP_, str, strlen(str)); \
680})
681extern int __trace_bputs(unsigned long ip, const char *str);
682extern int __trace_puts(unsigned long ip, const char *str, int size);
683
684extern void trace_dump_stack(int skip);
685
686
687
688
689
690
691#define ftrace_vprintk(fmt, vargs) \
692do { \
693 if (__builtin_constant_p(fmt)) { \
694 static const char *trace_printk_fmt __used \
695 __attribute__((section("__trace_printk_fmt"))) = \
696 __builtin_constant_p(fmt) ? fmt : NULL; \
697 \
698 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
699 } else \
700 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
701} while (0)
702
703extern __printf(2, 0) int
704__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
705
706extern __printf(2, 0) int
707__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
708
709extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
710#else
711static inline void tracing_start(void) { }
712static inline void tracing_stop(void) { }
713static inline void trace_dump_stack(int skip) { }
714
715static inline void tracing_on(void) { }
716static inline void tracing_off(void) { }
717static inline int tracing_is_on(void) { return 0; }
718static inline void tracing_snapshot(void) { }
719static inline void tracing_snapshot_alloc(void) { }
720
721static inline __printf(1, 2)
722int trace_printk(const char *fmt, ...)
723{
724 return 0;
725}
726static __printf(1, 0) inline int
727ftrace_vprintk(const char *fmt, va_list ap)
728{
729 return 0;
730}
731static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
732#endif
733
734
735
736
737
738
739#define min(x, y) ({ \
740 typeof(x) _min1 = (x); \
741 typeof(y) _min2 = (y); \
742 (void) (&_min1 == &_min2); \
743 _min1 < _min2 ? _min1 : _min2; })
744
745#define max(x, y) ({ \
746 typeof(x) _max1 = (x); \
747 typeof(y) _max2 = (y); \
748 (void) (&_max1 == &_max2); \
749 _max1 > _max2 ? _max1 : _max2; })
750
751#define min3(x, y, z) min((typeof(x))min(x, y), z)
752#define max3(x, y, z) max((typeof(x))max(x, y), z)
753
754
755
756
757
758
759#define min_not_zero(x, y) ({ \
760 typeof(x) __x = (x); \
761 typeof(y) __y = (y); \
762 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
763
764
765
766
767
768
769
770
771
772
773#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
774
775
776
777
778
779
780
781#define min_t(type, x, y) ({ \
782 type __min1 = (x); \
783 type __min2 = (y); \
784 __min1 < __min2 ? __min1: __min2; })
785
786#define max_t(type, x, y) ({ \
787 type __max1 = (x); \
788 type __max2 = (y); \
789 __max1 > __max2 ? __max1: __max2; })
790
791
792
793
794
795
796
797
798
799
800
801#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
802
803
804
805
806
807
808
809
810
811
812
813
814#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
815
816
817
818
819
820#define swap(a, b) \
821 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
822
823
824
825
826
827
828
829
830#define container_of(ptr, type, member) ({ \
831 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
832 (type *)( (char *)__mptr - offsetof(type,member) );})
833
834
835#ifdef CONFIG_FTRACE_MCOUNT_RECORD
836# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
837#endif
838
839
840#define VERIFY_OCTAL_PERMISSIONS(perms) \
841 (BUILD_BUG_ON_ZERO((perms) < 0) + \
842 BUILD_BUG_ON_ZERO((perms) > 0777) + \
843 \
844 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
845 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
846 \
847 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
848 \
849 BUILD_BUG_ON_ZERO((perms) & 2) + \
850 (perms))
851#endif
852