1#ifndef _LINUX_KERNEL_H
2#define _LINUX_KERNEL_H
3
4
5#include <stdarg.h>
6#include <linux/linkage.h>
7#include <linux/stddef.h>
8#include <linux/types.h>
9#include <linux/compiler.h>
10#include <linux/bitops.h>
11#include <linux/log2.h>
12#include <linux/typecheck.h>
13#include <linux/printk.h>
14#include <asm/byteorder.h>
15#include <uapi/linux/kernel.h>
16
17#define USHRT_MAX ((u16)(~0U))
18#define SHRT_MAX ((s16)(USHRT_MAX>>1))
19#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
20#define INT_MAX ((int)(~0U>>1))
21#define INT_MIN (-INT_MAX - 1)
22#define UINT_MAX (~0U)
23#define LONG_MAX ((long)(~0UL>>1))
24#define LONG_MIN (-LONG_MAX - 1)
25#define ULONG_MAX (~0UL)
26#define LLONG_MAX ((long long)(~0ULL>>1))
27#define LLONG_MIN (-LLONG_MAX - 1)
28#define ULLONG_MAX (~0ULL)
29#define SIZE_MAX (~(size_t)0)
30
31#define U8_MAX ((u8)~0U)
32#define S8_MAX ((s8)(U8_MAX>>1))
33#define S8_MIN ((s8)(-S8_MAX - 1))
34#define U16_MAX ((u16)~0U)
35#define S16_MAX ((s16)(U16_MAX>>1))
36#define S16_MIN ((s16)(-S16_MAX - 1))
37#define U32_MAX ((u32)~0U)
38#define S32_MAX ((s32)(U32_MAX>>1))
39#define S32_MIN ((s32)(-S32_MAX - 1))
40#define U64_MAX ((u64)~0ULL)
41#define S64_MAX ((s64)(U64_MAX>>1))
42#define S64_MIN ((s64)(-S64_MAX - 1))
43
44#define STACK_MAGIC 0xdeadbeef
45
46#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
47
48#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
49#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
50#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
51#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
52
53#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
54
55#define u64_to_user_ptr(x) ( \
56{ \
57 typecheck(u64, x); \
58 (void __user *)(uintptr_t)x; \
59} \
60)
61
62
63
64
65
66
67
68#define __round_mask(x, y) ((__typeof__(x))((y)-1))
69#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
70#define round_down(x, y) ((x) & ~__round_mask(x, y))
71
72#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
73#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
74#define DIV_ROUND_UP_ULL(ll,d) \
75 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
76
77#if BITS_PER_LONG == 32
78# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
79#else
80# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
81#endif
82
83
84#define roundup(x, y) ( \
85{ \
86 const typeof(y) __y = y; \
87 (((x) + (__y - 1)) / __y) * __y; \
88} \
89)
90#define rounddown(x, y) ( \
91{ \
92 typeof(x) __x = (x); \
93 __x - (__x % (y)); \
94} \
95)
96
97
98
99
100
101
102#define DIV_ROUND_CLOSEST(x, divisor)( \
103{ \
104 typeof(x) __x = x; \
105 typeof(divisor) __d = divisor; \
106 (((typeof(x))-1) > 0 || \
107 ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
108 (((__x) + ((__d) / 2)) / (__d)) : \
109 (((__x) - ((__d) / 2)) / (__d)); \
110} \
111)
112
113
114
115
116#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
117{ \
118 typeof(divisor) __d = divisor; \
119 unsigned long long _tmp = (x) + (__d) / 2; \
120 do_div(_tmp, __d); \
121 _tmp; \
122} \
123)
124
125
126
127
128
129#define mult_frac(x, numer, denom)( \
130{ \
131 typeof(x) quot = (x) / (denom); \
132 typeof(x) rem = (x) % (denom); \
133 (quot * (numer)) + ((rem * (numer)) / (denom)); \
134} \
135)
136
137
138#define _RET_IP_ (unsigned long)__builtin_return_address(0)
139#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
140
141#ifdef CONFIG_LBDAF
142# include <asm/div64.h>
143# define sector_div(a, b) do_div(a, b)
144#else
145# define sector_div(n, b)( \
146{ \
147 int _res; \
148 _res = (n) % (b); \
149 (n) /= (b); \
150 _res; \
151} \
152)
153#endif
154
155
156
157
158
159
160
161
162
163#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
164
165
166
167
168
169#define lower_32_bits(n) ((u32)(n))
170
171struct completion;
172struct pt_regs;
173struct user;
174
175#ifdef CONFIG_PREEMPT_VOLUNTARY
176extern int _cond_resched(void);
177# define might_resched() _cond_resched()
178#else
179# define might_resched() do { } while (0)
180#endif
181
182#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
183 void ___might_sleep(const char *file, int line, int preempt_offset);
184 void __might_sleep(const char *file, int line, int preempt_offset);
185
186
187
188
189
190
191
192
193
194
195# define might_sleep() \
196 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
197# define sched_annotate_sleep() (current->task_state_change = 0)
198#else
199 static inline void ___might_sleep(const char *file, int line,
200 int preempt_offset) { }
201 static inline void __might_sleep(const char *file, int line,
202 int preempt_offset) { }
203# define might_sleep() do { might_resched(); } while (0)
204# define sched_annotate_sleep() do { } while (0)
205#endif
206
207#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
208
209
210
211
212
213
214
215
216
217#define abs(x) __abs_choose_expr(x, long long, \
218 __abs_choose_expr(x, long, \
219 __abs_choose_expr(x, int, \
220 __abs_choose_expr(x, short, \
221 __abs_choose_expr(x, char, \
222 __builtin_choose_expr( \
223 __builtin_types_compatible_p(typeof(x), char), \
224 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
225 ((void)0)))))))
226
227#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
228 __builtin_types_compatible_p(typeof(x), signed type) || \
229 __builtin_types_compatible_p(typeof(x), unsigned type), \
230 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
247{
248 return (u32)(((u64) val * ep_ro) >> 32);
249}
250
251#if defined(CONFIG_MMU) && \
252 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
253#define might_fault() __might_fault(__FILE__, __LINE__)
254void __might_fault(const char *file, int line);
255#else
256static inline void might_fault(void) { }
257#endif
258
259extern struct atomic_notifier_head panic_notifier_list;
260extern long (*panic_blink)(int state);
261__printf(1, 2)
262void panic(const char *fmt, ...)
263 __noreturn __cold;
264void nmi_panic(struct pt_regs *regs, const char *msg);
265extern void oops_enter(void);
266extern void oops_exit(void);
267void print_oops_end_marker(void);
268extern int oops_may_print(void);
269void do_exit(long error_code)
270 __noreturn;
271void complete_and_exit(struct completion *, long)
272 __noreturn;
273
274
275int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
276int __must_check _kstrtol(const char *s, unsigned int base, long *res);
277
278int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
279int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
298{
299
300
301
302
303 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
304 __alignof__(unsigned long) == __alignof__(unsigned long long))
305 return kstrtoull(s, base, (unsigned long long *)res);
306 else
307 return _kstrtoul(s, base, res);
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
327{
328
329
330
331
332 if (sizeof(long) == sizeof(long long) &&
333 __alignof__(long) == __alignof__(long long))
334 return kstrtoll(s, base, (long long *)res);
335 else
336 return _kstrtol(s, base, res);
337}
338
339int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
340int __must_check kstrtoint(const char *s, unsigned int base, int *res);
341
342static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
343{
344 return kstrtoull(s, base, res);
345}
346
347static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
348{
349 return kstrtoll(s, base, res);
350}
351
352static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
353{
354 return kstrtouint(s, base, res);
355}
356
357static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
358{
359 return kstrtoint(s, base, res);
360}
361
362int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
363int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
364int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
365int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
366int __must_check kstrtobool(const char *s, bool *res);
367
368int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
369int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
370int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
371int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
372int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
373int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
374int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
375int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
376int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
377int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
378int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
379
380static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
381{
382 return kstrtoull_from_user(s, count, base, res);
383}
384
385static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
386{
387 return kstrtoll_from_user(s, count, base, res);
388}
389
390static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
391{
392 return kstrtouint_from_user(s, count, base, res);
393}
394
395static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
396{
397 return kstrtoint_from_user(s, count, base, res);
398}
399
400
401
402extern unsigned long simple_strtoul(const char *,char **,unsigned int);
403extern long simple_strtol(const char *,char **,unsigned int);
404extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
405extern long long simple_strtoll(const char *,char **,unsigned int);
406
407extern int num_to_str(char *buf, int size, unsigned long long num);
408
409
410
411extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
412extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
413extern __printf(3, 4)
414int snprintf(char *buf, size_t size, const char *fmt, ...);
415extern __printf(3, 0)
416int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
417extern __printf(3, 4)
418int scnprintf(char *buf, size_t size, const char *fmt, ...);
419extern __printf(3, 0)
420int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
421extern __printf(2, 3) __malloc
422char *kasprintf(gfp_t gfp, const char *fmt, ...);
423extern __printf(2, 0) __malloc
424char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
425extern __printf(2, 0)
426const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
427
428extern __scanf(2, 3)
429int sscanf(const char *, const char *, ...);
430extern __scanf(2, 0)
431int vsscanf(const char *, const char *, va_list);
432
433extern int get_option(char **str, int *pint);
434extern char *get_options(const char *str, int nints, int *ints);
435extern unsigned long long memparse(const char *ptr, char **retptr);
436extern bool parse_option_str(const char *str, const char *option);
437
438extern int core_kernel_text(unsigned long addr);
439extern int core_kernel_data(unsigned long addr);
440extern int __kernel_text_address(unsigned long addr);
441extern int kernel_text_address(unsigned long addr);
442extern int func_ptr_is_kernel_text(void *ptr);
443
444unsigned long int_sqrt(unsigned long);
445
446extern void bust_spinlocks(int yes);
447extern int oops_in_progress;
448extern int panic_timeout;
449extern int panic_on_oops;
450extern int panic_on_unrecovered_nmi;
451extern int panic_on_io_nmi;
452extern int panic_on_warn;
453extern int sysctl_panic_on_rcu_stall;
454extern int sysctl_panic_on_stackoverflow;
455
456extern bool crash_kexec_post_notifiers;
457
458
459
460
461
462
463extern atomic_t panic_cpu;
464#define PANIC_CPU_INVALID -1
465
466
467
468
469
470static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
471{
472 if (panic_timeout == arch_default_timeout)
473 panic_timeout = timeout;
474}
475extern const char *print_tainted(void);
476enum lockdep_ok {
477 LOCKDEP_STILL_OK,
478 LOCKDEP_NOW_UNRELIABLE
479};
480extern void add_taint(unsigned flag, enum lockdep_ok);
481extern int test_taint(unsigned flag);
482extern unsigned long get_taint(void);
483extern int root_mountflags;
484
485extern bool early_boot_irqs_disabled;
486
487
488extern enum system_states {
489 SYSTEM_BOOTING,
490 SYSTEM_RUNNING,
491 SYSTEM_HALT,
492 SYSTEM_POWER_OFF,
493 SYSTEM_RESTART,
494} system_state;
495
496#define TAINT_PROPRIETARY_MODULE 0
497#define TAINT_FORCED_MODULE 1
498#define TAINT_CPU_OUT_OF_SPEC 2
499#define TAINT_FORCED_RMMOD 3
500#define TAINT_MACHINE_CHECK 4
501#define TAINT_BAD_PAGE 5
502#define TAINT_USER 6
503#define TAINT_DIE 7
504#define TAINT_OVERRIDDEN_ACPI_TABLE 8
505#define TAINT_WARN 9
506#define TAINT_CRAP 10
507#define TAINT_FIRMWARE_WORKAROUND 11
508#define TAINT_OOT_MODULE 12
509#define TAINT_UNSIGNED_MODULE 13
510#define TAINT_SOFTLOCKUP 14
511#define TAINT_LIVEPATCH 15
512
513extern const char hex_asc[];
514#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
515#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
516
517static inline char *hex_byte_pack(char *buf, u8 byte)
518{
519 *buf++ = hex_asc_hi(byte);
520 *buf++ = hex_asc_lo(byte);
521 return buf;
522}
523
524extern const char hex_asc_upper[];
525#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
526#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
527
528static inline char *hex_byte_pack_upper(char *buf, u8 byte)
529{
530 *buf++ = hex_asc_upper_hi(byte);
531 *buf++ = hex_asc_upper_lo(byte);
532 return buf;
533}
534
535extern int hex_to_bin(char ch);
536extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
537extern char *bin2hex(char *dst, const void *src, size_t count);
538
539bool mac_pton(const char *s, u8 *mac);
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561enum ftrace_dump_mode {
562 DUMP_NONE,
563 DUMP_ALL,
564 DUMP_ORIG,
565};
566
567#ifdef CONFIG_TRACING
568void tracing_on(void);
569void tracing_off(void);
570int tracing_is_on(void);
571void tracing_snapshot(void);
572void tracing_snapshot_alloc(void);
573
574extern void tracing_start(void);
575extern void tracing_stop(void);
576
577static inline __printf(1, 2)
578void ____trace_printk_check_format(const char *fmt, ...)
579{
580}
581#define __trace_printk_check_format(fmt, args...) \
582do { \
583 if (0) \
584 ____trace_printk_check_format(fmt, ##args); \
585} while (0)
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617#define trace_printk(fmt, ...) \
618do { \
619 char _______STR[] = __stringify((__VA_ARGS__)); \
620 if (sizeof(_______STR) > 3) \
621 do_trace_printk(fmt, ##__VA_ARGS__); \
622 else \
623 trace_puts(fmt); \
624} while (0)
625
626#define do_trace_printk(fmt, args...) \
627do { \
628 static const char *trace_printk_fmt __used \
629 __attribute__((section("__trace_printk_fmt"))) = \
630 __builtin_constant_p(fmt) ? fmt : NULL; \
631 \
632 __trace_printk_check_format(fmt, ##args); \
633 \
634 if (__builtin_constant_p(fmt)) \
635 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
636 else \
637 __trace_printk(_THIS_IP_, fmt, ##args); \
638} while (0)
639
640extern __printf(2, 3)
641int __trace_bprintk(unsigned long ip, const char *fmt, ...);
642
643extern __printf(2, 3)
644int __trace_printk(unsigned long ip, const char *fmt, ...);
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671#define trace_puts(str) ({ \
672 static const char *trace_printk_fmt __used \
673 __attribute__((section("__trace_printk_fmt"))) = \
674 __builtin_constant_p(str) ? str : NULL; \
675 \
676 if (__builtin_constant_p(str)) \
677 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
678 else \
679 __trace_puts(_THIS_IP_, str, strlen(str)); \
680})
681extern int __trace_bputs(unsigned long ip, const char *str);
682extern int __trace_puts(unsigned long ip, const char *str, int size);
683
684extern void trace_dump_stack(int skip);
685
686
687
688
689
690
691#define ftrace_vprintk(fmt, vargs) \
692do { \
693 if (__builtin_constant_p(fmt)) { \
694 static const char *trace_printk_fmt __used \
695 __attribute__((section("__trace_printk_fmt"))) = \
696 __builtin_constant_p(fmt) ? fmt : NULL; \
697 \
698 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
699 } else \
700 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
701} while (0)
702
703extern __printf(2, 0) int
704__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
705
706extern __printf(2, 0) int
707__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
708
709extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
710#else
711static inline void tracing_start(void) { }
712static inline void tracing_stop(void) { }
713static inline void trace_dump_stack(int skip) { }
714
715static inline void tracing_on(void) { }
716static inline void tracing_off(void) { }
717static inline int tracing_is_on(void) { return 0; }
718static inline void tracing_snapshot(void) { }
719static inline void tracing_snapshot_alloc(void) { }
720
721static inline __printf(1, 2)
722int trace_printk(const char *fmt, ...)
723{
724 return 0;
725}
726static __printf(1, 0) inline int
727ftrace_vprintk(const char *fmt, va_list ap)
728{
729 return 0;
730}
731static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
732#endif
733
734
735
736
737
738
739#define min(x, y) ({ \
740 typeof(x) _min1 = (x); \
741 typeof(y) _min2 = (y); \
742 (void) (&_min1 == &_min2); \
743 _min1 < _min2 ? _min1 : _min2; })
744
745#define max(x, y) ({ \
746 typeof(x) _max1 = (x); \
747 typeof(y) _max2 = (y); \
748 (void) (&_max1 == &_max2); \
749 _max1 > _max2 ? _max1 : _max2; })
750
751#define min3(x, y, z) min((typeof(x))min(x, y), z)
752#define max3(x, y, z) max((typeof(x))max(x, y), z)
753
754
755
756
757
758
759#define min_not_zero(x, y) ({ \
760 typeof(x) __x = (x); \
761 typeof(y) __y = (y); \
762 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
763
764
765
766
767
768
769
770
771
772
773#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
774
775
776
777
778
779
780
781#define min_t(type, x, y) ({ \
782 type __min1 = (x); \
783 type __min2 = (y); \
784 __min1 < __min2 ? __min1: __min2; })
785
786#define max_t(type, x, y) ({ \
787 type __max1 = (x); \
788 type __max2 = (y); \
789 __max1 > __max2 ? __max1: __max2; })
790
791
792
793
794
795
796
797
798
799
800
801#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
802
803
804
805
806
807
808
809
810
811
812
813
814#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
815
816
817
818
819
820#define swap(a, b) \
821 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
822
823
824
825
826
827
828
829
830#define container_of(ptr, type, member) ({ \
831 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
832 (type *)( (char *)__mptr - offsetof(type,member) );})
833
834
835#ifdef CONFIG_FTRACE_MCOUNT_RECORD
836# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
837#endif
838
839
840#define VERIFY_OCTAL_PERMISSIONS(perms) \
841 (BUILD_BUG_ON_ZERO((perms) < 0) + \
842 BUILD_BUG_ON_ZERO((perms) > 0777) + \
843 \
844 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
845 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
846 \
847 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
848 \
849 BUILD_BUG_ON_ZERO((perms) & 2) + \
850 (perms))
851#endif
852