1#ifndef _LINUX_KERNEL_H
2#define _LINUX_KERNEL_H
3
4
5#include <stdarg.h>
6#include <linux/linkage.h>
7#include <linux/stddef.h>
8#include <linux/types.h>
9#include <linux/compiler.h>
10#include <linux/bitops.h>
11#include <linux/log2.h>
12#include <linux/typecheck.h>
13#include <linux/printk.h>
14#include <linux/dynamic_debug.h>
15#include <asm/byteorder.h>
16#include <uapi/linux/kernel.h>
17
18#define USHRT_MAX ((u16)(~0U))
19#define SHRT_MAX ((s16)(USHRT_MAX>>1))
20#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
21#define INT_MAX ((int)(~0U>>1))
22#define INT_MIN (-INT_MAX - 1)
23#define UINT_MAX (~0U)
24#define LONG_MAX ((long)(~0UL>>1))
25#define LONG_MIN (-LONG_MAX - 1)
26#define ULONG_MAX (~0UL)
27#define LLONG_MAX ((long long)(~0ULL>>1))
28#define LLONG_MIN (-LLONG_MAX - 1)
29#define ULLONG_MAX (~0ULL)
30#define SIZE_MAX (~(size_t)0)
31
32#define U8_MAX ((u8)~0U)
33#define S8_MAX ((s8)(U8_MAX>>1))
34#define S8_MIN ((s8)(-S8_MAX - 1))
35#define U16_MAX ((u16)~0U)
36#define S16_MAX ((s16)(U16_MAX>>1))
37#define S16_MIN ((s16)(-S16_MAX - 1))
38#define U32_MAX ((u32)~0U)
39#define S32_MAX ((s32)(U32_MAX>>1))
40#define S32_MIN ((s32)(-S32_MAX - 1))
41#define U64_MAX ((u64)~0ULL)
42#define S64_MAX ((s64)(U64_MAX>>1))
43#define S64_MIN ((s64)(-S64_MAX - 1))
44
45#define STACK_MAGIC 0xdeadbeef
46
47#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
48
49#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
50#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
51#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
52#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
53
54#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
55
56
57
58
59
60
61
62#define __round_mask(x, y) ((__typeof__(x))((y)-1))
63#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
64#define round_down(x, y) ((x) & ~__round_mask(x, y))
65
66#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
67#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
68#define DIV_ROUND_UP_ULL(ll,d) \
69 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
70
71#if BITS_PER_LONG == 32
72# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
73#else
74# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
75#endif
76
77
78#define roundup(x, y) ( \
79{ \
80 const typeof(y) __y = y; \
81 (((x) + (__y - 1)) / __y) * __y; \
82} \
83)
84#define rounddown(x, y) ( \
85{ \
86 typeof(x) __x = (x); \
87 __x - (__x % (y)); \
88} \
89)
90
91
92
93
94
95
96#define DIV_ROUND_CLOSEST(x, divisor)( \
97{ \
98 typeof(x) __x = x; \
99 typeof(divisor) __d = divisor; \
100 (((typeof(x))-1) > 0 || \
101 ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
102 (((__x) + ((__d) / 2)) / (__d)) : \
103 (((__x) - ((__d) / 2)) / (__d)); \
104} \
105)
106
107
108
109
110#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
111{ \
112 typeof(divisor) __d = divisor; \
113 unsigned long long _tmp = (x) + (__d) / 2; \
114 do_div(_tmp, __d); \
115 _tmp; \
116} \
117)
118
119
120
121
122
123#define mult_frac(x, numer, denom)( \
124{ \
125 typeof(x) quot = (x) / (denom); \
126 typeof(x) rem = (x) % (denom); \
127 (quot * (numer)) + ((rem * (numer)) / (denom)); \
128} \
129)
130
131
132#define _RET_IP_ (unsigned long)__builtin_return_address(0)
133#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
134
135#ifdef CONFIG_LBDAF
136# include <asm/div64.h>
137# define sector_div(a, b) do_div(a, b)
138#else
139# define sector_div(n, b)( \
140{ \
141 int _res; \
142 _res = (n) % (b); \
143 (n) /= (b); \
144 _res; \
145} \
146)
147#endif
148
149
150
151
152
153
154
155
156
157#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
158
159
160
161
162
163#define lower_32_bits(n) ((u32)(n))
164
165struct completion;
166struct pt_regs;
167struct user;
168
169#ifdef CONFIG_PREEMPT_VOLUNTARY
170extern int _cond_resched(void);
171# define might_resched() _cond_resched()
172#else
173# define might_resched() do { } while (0)
174#endif
175
176#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
177 void ___might_sleep(const char *file, int line, int preempt_offset);
178 void __might_sleep(const char *file, int line, int preempt_offset);
179
180
181
182
183
184
185
186
187
188
189# define might_sleep() \
190 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
191# define sched_annotate_sleep() (current->task_state_change = 0)
192#else
193 static inline void ___might_sleep(const char *file, int line,
194 int preempt_offset) { }
195 static inline void __might_sleep(const char *file, int line,
196 int preempt_offset) { }
197# define might_sleep() do { might_resched(); } while (0)
198# define sched_annotate_sleep() do { } while (0)
199#endif
200
201#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
202
203
204
205
206
207
208
209
210
211#define abs(x) __abs_choose_expr(x, long long, \
212 __abs_choose_expr(x, long, \
213 __abs_choose_expr(x, int, \
214 __abs_choose_expr(x, short, \
215 __abs_choose_expr(x, char, \
216 __builtin_choose_expr( \
217 __builtin_types_compatible_p(typeof(x), char), \
218 (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
219 ((void)0)))))))
220
221#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
222 __builtin_types_compatible_p(typeof(x), signed type) || \
223 __builtin_types_compatible_p(typeof(x), unsigned type), \
224 ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
241{
242 return (u32)(((u64) val * ep_ro) >> 32);
243}
244
245#if defined(CONFIG_MMU) && \
246 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
247#define might_fault() __might_fault(__FILE__, __LINE__)
248void __might_fault(const char *file, int line);
249#else
250static inline void might_fault(void) { }
251#endif
252
253extern struct atomic_notifier_head panic_notifier_list;
254extern long (*panic_blink)(int state);
255__printf(1, 2)
256void panic(const char *fmt, ...)
257 __noreturn __cold;
258void nmi_panic(struct pt_regs *regs, const char *msg);
259extern void oops_enter(void);
260extern void oops_exit(void);
261void print_oops_end_marker(void);
262extern int oops_may_print(void);
263void do_exit(long error_code)
264 __noreturn;
265void complete_and_exit(struct completion *, long)
266 __noreturn;
267
268
269int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
270int __must_check _kstrtol(const char *s, unsigned int base, long *res);
271
272int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
273int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
292{
293
294
295
296
297 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
298 __alignof__(unsigned long) == __alignof__(unsigned long long))
299 return kstrtoull(s, base, (unsigned long long *)res);
300 else
301 return _kstrtoul(s, base, res);
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
321{
322
323
324
325
326 if (sizeof(long) == sizeof(long long) &&
327 __alignof__(long) == __alignof__(long long))
328 return kstrtoll(s, base, (long long *)res);
329 else
330 return _kstrtol(s, base, res);
331}
332
333int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
334int __must_check kstrtoint(const char *s, unsigned int base, int *res);
335
336static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
337{
338 return kstrtoull(s, base, res);
339}
340
341static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
342{
343 return kstrtoll(s, base, res);
344}
345
346static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
347{
348 return kstrtouint(s, base, res);
349}
350
351static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
352{
353 return kstrtoint(s, base, res);
354}
355
356int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
357int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
358int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
359int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
360int __must_check kstrtobool(const char *s, bool *res);
361
362int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
363int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
364int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
365int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
366int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
367int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
368int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
369int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
370int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
371int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
372int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
373
374static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
375{
376 return kstrtoull_from_user(s, count, base, res);
377}
378
379static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
380{
381 return kstrtoll_from_user(s, count, base, res);
382}
383
384static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
385{
386 return kstrtouint_from_user(s, count, base, res);
387}
388
389static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
390{
391 return kstrtoint_from_user(s, count, base, res);
392}
393
394
395
396extern unsigned long simple_strtoul(const char *,char **,unsigned int);
397extern long simple_strtol(const char *,char **,unsigned int);
398extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
399extern long long simple_strtoll(const char *,char **,unsigned int);
400
401extern int num_to_str(char *buf, int size, unsigned long long num);
402
403
404
405extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
406extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
407extern __printf(3, 4)
408int snprintf(char *buf, size_t size, const char *fmt, ...);
409extern __printf(3, 0)
410int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
411extern __printf(3, 4)
412int scnprintf(char *buf, size_t size, const char *fmt, ...);
413extern __printf(3, 0)
414int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
415extern __printf(2, 3)
416char *kasprintf(gfp_t gfp, const char *fmt, ...);
417extern __printf(2, 0)
418char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
419extern __printf(2, 0)
420const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
421
422extern __scanf(2, 3)
423int sscanf(const char *, const char *, ...);
424extern __scanf(2, 0)
425int vsscanf(const char *, const char *, va_list);
426
427extern int get_option(char **str, int *pint);
428extern char *get_options(const char *str, int nints, int *ints);
429extern unsigned long long memparse(const char *ptr, char **retptr);
430extern bool parse_option_str(const char *str, const char *option);
431
432extern int core_kernel_text(unsigned long addr);
433extern int core_kernel_data(unsigned long addr);
434extern int __kernel_text_address(unsigned long addr);
435extern int kernel_text_address(unsigned long addr);
436extern int func_ptr_is_kernel_text(void *ptr);
437
438unsigned long int_sqrt(unsigned long);
439
440extern void bust_spinlocks(int yes);
441extern int oops_in_progress;
442extern int panic_timeout;
443extern int panic_on_oops;
444extern int panic_on_unrecovered_nmi;
445extern int panic_on_io_nmi;
446extern int panic_on_warn;
447extern int sysctl_panic_on_stackoverflow;
448
449extern bool crash_kexec_post_notifiers;
450
451
452
453
454
455
456extern atomic_t panic_cpu;
457#define PANIC_CPU_INVALID -1
458
459
460
461
462
463static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
464{
465 if (panic_timeout == arch_default_timeout)
466 panic_timeout = timeout;
467}
468extern const char *print_tainted(void);
469enum lockdep_ok {
470 LOCKDEP_STILL_OK,
471 LOCKDEP_NOW_UNRELIABLE
472};
473extern void add_taint(unsigned flag, enum lockdep_ok);
474extern int test_taint(unsigned flag);
475extern unsigned long get_taint(void);
476extern int root_mountflags;
477
478extern bool early_boot_irqs_disabled;
479
480
481extern enum system_states {
482 SYSTEM_BOOTING,
483 SYSTEM_RUNNING,
484 SYSTEM_HALT,
485 SYSTEM_POWER_OFF,
486 SYSTEM_RESTART,
487} system_state;
488
489#define TAINT_PROPRIETARY_MODULE 0
490#define TAINT_FORCED_MODULE 1
491#define TAINT_CPU_OUT_OF_SPEC 2
492#define TAINT_FORCED_RMMOD 3
493#define TAINT_MACHINE_CHECK 4
494#define TAINT_BAD_PAGE 5
495#define TAINT_USER 6
496#define TAINT_DIE 7
497#define TAINT_OVERRIDDEN_ACPI_TABLE 8
498#define TAINT_WARN 9
499#define TAINT_CRAP 10
500#define TAINT_FIRMWARE_WORKAROUND 11
501#define TAINT_OOT_MODULE 12
502#define TAINT_UNSIGNED_MODULE 13
503#define TAINT_SOFTLOCKUP 14
504#define TAINT_LIVEPATCH 15
505
506extern const char hex_asc[];
507#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
508#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
509
510static inline char *hex_byte_pack(char *buf, u8 byte)
511{
512 *buf++ = hex_asc_hi(byte);
513 *buf++ = hex_asc_lo(byte);
514 return buf;
515}
516
517extern const char hex_asc_upper[];
518#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
519#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
520
521static inline char *hex_byte_pack_upper(char *buf, u8 byte)
522{
523 *buf++ = hex_asc_upper_hi(byte);
524 *buf++ = hex_asc_upper_lo(byte);
525 return buf;
526}
527
528extern int hex_to_bin(char ch);
529extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
530extern char *bin2hex(char *dst, const void *src, size_t count);
531
532bool mac_pton(const char *s, u8 *mac);
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554enum ftrace_dump_mode {
555 DUMP_NONE,
556 DUMP_ALL,
557 DUMP_ORIG,
558};
559
560#ifdef CONFIG_TRACING
561void tracing_on(void);
562void tracing_off(void);
563int tracing_is_on(void);
564void tracing_snapshot(void);
565void tracing_snapshot_alloc(void);
566
567extern void tracing_start(void);
568extern void tracing_stop(void);
569
570static inline __printf(1, 2)
571void ____trace_printk_check_format(const char *fmt, ...)
572{
573}
574#define __trace_printk_check_format(fmt, args...) \
575do { \
576 if (0) \
577 ____trace_printk_check_format(fmt, ##args); \
578} while (0)
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610#define trace_printk(fmt, ...) \
611do { \
612 char _______STR[] = __stringify((__VA_ARGS__)); \
613 if (sizeof(_______STR) > 3) \
614 do_trace_printk(fmt, ##__VA_ARGS__); \
615 else \
616 trace_puts(fmt); \
617} while (0)
618
619#define do_trace_printk(fmt, args...) \
620do { \
621 static const char *trace_printk_fmt __used \
622 __attribute__((section("__trace_printk_fmt"))) = \
623 __builtin_constant_p(fmt) ? fmt : NULL; \
624 \
625 __trace_printk_check_format(fmt, ##args); \
626 \
627 if (__builtin_constant_p(fmt)) \
628 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
629 else \
630 __trace_printk(_THIS_IP_, fmt, ##args); \
631} while (0)
632
633extern __printf(2, 3)
634int __trace_bprintk(unsigned long ip, const char *fmt, ...);
635
636extern __printf(2, 3)
637int __trace_printk(unsigned long ip, const char *fmt, ...);
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664#define trace_puts(str) ({ \
665 static const char *trace_printk_fmt __used \
666 __attribute__((section("__trace_printk_fmt"))) = \
667 __builtin_constant_p(str) ? str : NULL; \
668 \
669 if (__builtin_constant_p(str)) \
670 __trace_bputs(_THIS_IP_, trace_printk_fmt); \
671 else \
672 __trace_puts(_THIS_IP_, str, strlen(str)); \
673})
674extern int __trace_bputs(unsigned long ip, const char *str);
675extern int __trace_puts(unsigned long ip, const char *str, int size);
676
677extern void trace_dump_stack(int skip);
678
679
680
681
682
683
684#define ftrace_vprintk(fmt, vargs) \
685do { \
686 if (__builtin_constant_p(fmt)) { \
687 static const char *trace_printk_fmt __used \
688 __attribute__((section("__trace_printk_fmt"))) = \
689 __builtin_constant_p(fmt) ? fmt : NULL; \
690 \
691 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
692 } else \
693 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
694} while (0)
695
696extern __printf(2, 0) int
697__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
698
699extern __printf(2, 0) int
700__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
701
702extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
703#else
704static inline void tracing_start(void) { }
705static inline void tracing_stop(void) { }
706static inline void trace_dump_stack(int skip) { }
707
708static inline void tracing_on(void) { }
709static inline void tracing_off(void) { }
710static inline int tracing_is_on(void) { return 0; }
711static inline void tracing_snapshot(void) { }
712static inline void tracing_snapshot_alloc(void) { }
713
714static inline __printf(1, 2)
715int trace_printk(const char *fmt, ...)
716{
717 return 0;
718}
719static __printf(1, 0) inline int
720ftrace_vprintk(const char *fmt, va_list ap)
721{
722 return 0;
723}
724static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
725#endif
726
727
728
729
730
731
732#define min(x, y) ({ \
733 typeof(x) _min1 = (x); \
734 typeof(y) _min2 = (y); \
735 (void) (&_min1 == &_min2); \
736 _min1 < _min2 ? _min1 : _min2; })
737
738#define max(x, y) ({ \
739 typeof(x) _max1 = (x); \
740 typeof(y) _max2 = (y); \
741 (void) (&_max1 == &_max2); \
742 _max1 > _max2 ? _max1 : _max2; })
743
744#define min3(x, y, z) min((typeof(x))min(x, y), z)
745#define max3(x, y, z) max((typeof(x))max(x, y), z)
746
747
748
749
750
751
752#define min_not_zero(x, y) ({ \
753 typeof(x) __x = (x); \
754 typeof(y) __y = (y); \
755 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
756
757
758
759
760
761
762
763
764
765
766#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
767
768
769
770
771
772
773
774#define min_t(type, x, y) ({ \
775 type __min1 = (x); \
776 type __min2 = (y); \
777 __min1 < __min2 ? __min1: __min2; })
778
779#define max_t(type, x, y) ({ \
780 type __max1 = (x); \
781 type __max2 = (y); \
782 __max1 > __max2 ? __max1: __max2; })
783
784
785
786
787
788
789
790
791
792
793
794#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
795
796
797
798
799
800
801
802
803
804
805
806
807#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
808
809
810
811
812
813#define swap(a, b) \
814 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
815
816
817
818
819
820
821
822
823#define container_of(ptr, type, member) ({ \
824 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
825 (type *)( (char *)__mptr - offsetof(type,member) );})
826
827
828#ifdef CONFIG_FTRACE_MCOUNT_RECORD
829# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
830#endif
831
832
833#define VERIFY_OCTAL_PERMISSIONS(perms) \
834 (BUILD_BUG_ON_ZERO((perms) < 0) + \
835 BUILD_BUG_ON_ZERO((perms) > 0777) + \
836 \
837 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
838 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
839 \
840 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
841 \
842 BUILD_BUG_ON_ZERO((perms) & 2) + \
843 (perms))
844#endif
845