1
2
3
4
5
6
7
8#define pr_fmt(fmt) "kfence: " fmt
9
10#include <linux/atomic.h>
11#include <linux/bug.h>
12#include <linux/debugfs.h>
13#include <linux/irq_work.h>
14#include <linux/kcsan-checks.h>
15#include <linux/kfence.h>
16#include <linux/kmemleak.h>
17#include <linux/list.h>
18#include <linux/lockdep.h>
19#include <linux/memblock.h>
20#include <linux/moduleparam.h>
21#include <linux/random.h>
22#include <linux/rcupdate.h>
23#include <linux/sched/sysctl.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
27#include <linux/string.h>
28
29#include <asm/kfence.h>
30
31#include "kfence.h"
32
33
34#define KFENCE_WARN_ON(cond) \
35 ({ \
36 const bool __cond = WARN_ON(cond); \
37 if (unlikely(__cond)) \
38 WRITE_ONCE(kfence_enabled, false); \
39 __cond; \
40 })
41
42
43
44static bool kfence_enabled __read_mostly;
45
46static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
47
48#ifdef MODULE_PARAM_PREFIX
49#undef MODULE_PARAM_PREFIX
50#endif
51#define MODULE_PARAM_PREFIX "kfence."
52
53static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
54{
55 unsigned long num;
56 int ret = kstrtoul(val, 0, &num);
57
58 if (ret < 0)
59 return ret;
60
61 if (!num)
62 WRITE_ONCE(kfence_enabled, false);
63 else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
64 return -EINVAL;
65
66 *((unsigned long *)kp->arg) = num;
67 return 0;
68}
69
70static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
71{
72 if (!READ_ONCE(kfence_enabled))
73 return sprintf(buffer, "0\n");
74
75 return param_get_ulong(buffer, kp);
76}
77
78static const struct kernel_param_ops sample_interval_param_ops = {
79 .set = param_set_sample_interval,
80 .get = param_get_sample_interval,
81};
82module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
83
84
85char *__kfence_pool __ro_after_init;
86EXPORT_SYMBOL(__kfence_pool);
87
88
89
90
91
92static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
93struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
94
95
96static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
97static DEFINE_RAW_SPINLOCK(kfence_freelist_lock);
98
99#ifdef CONFIG_KFENCE_STATIC_KEYS
100
101DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
102#endif
103
104
105atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
106
107
108enum kfence_counter_id {
109 KFENCE_COUNTER_ALLOCATED,
110 KFENCE_COUNTER_ALLOCS,
111 KFENCE_COUNTER_FREES,
112 KFENCE_COUNTER_ZOMBIES,
113 KFENCE_COUNTER_BUGS,
114 KFENCE_COUNTER_COUNT,
115};
116static atomic_long_t counters[KFENCE_COUNTER_COUNT];
117static const char *const counter_names[] = {
118 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
119 [KFENCE_COUNTER_ALLOCS] = "total allocations",
120 [KFENCE_COUNTER_FREES] = "total frees",
121 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
122 [KFENCE_COUNTER_BUGS] = "total bugs",
123};
124static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
125
126
127
128static bool kfence_protect(unsigned long addr)
129{
130 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
131}
132
133static bool kfence_unprotect(unsigned long addr)
134{
135 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
136}
137
138static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
139{
140 long index;
141
142
143
144 if (!is_kfence_address((void *)addr))
145 return NULL;
146
147
148
149
150
151
152 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
153 if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
154 return NULL;
155
156 return &kfence_metadata[index];
157}
158
159static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
160{
161 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
162 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
163
164
165
166
167 if (KFENCE_WARN_ON(meta < kfence_metadata ||
168 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
169 return 0;
170
171
172
173
174
175 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
176 return 0;
177
178 return pageaddr;
179}
180
181
182
183
184
185static noinline void metadata_update_state(struct kfence_metadata *meta,
186 enum kfence_object_state next)
187{
188 struct kfence_track *track =
189 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
190
191 lockdep_assert_held(&meta->lock);
192
193
194
195
196
197 track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
198 track->pid = task_pid_nr(current);
199
200
201
202
203
204
205 WRITE_ONCE(meta->state, next);
206}
207
208
209static inline bool set_canary_byte(u8 *addr)
210{
211 *addr = KFENCE_CANARY_PATTERN(addr);
212 return true;
213}
214
215
216static inline bool check_canary_byte(u8 *addr)
217{
218 if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
219 return true;
220
221 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
222 kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
223 KFENCE_ERROR_CORRUPTION);
224 return false;
225}
226
227
228static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
229{
230 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
231 unsigned long addr;
232
233 lockdep_assert_held(&meta->lock);
234
235
236
237
238
239
240
241
242
243
244
245 for (addr = pageaddr; addr < meta->addr; addr++) {
246 if (!fn((u8 *)addr))
247 break;
248 }
249
250
251 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
252 if (!fn((u8 *)addr))
253 break;
254 }
255}
256
257static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
258{
259 struct kfence_metadata *meta = NULL;
260 unsigned long flags;
261 struct page *page;
262 void *addr;
263
264
265 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
266 if (!list_empty(&kfence_freelist)) {
267 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
268 list_del_init(&meta->list);
269 }
270 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
271 if (!meta)
272 return NULL;
273
274 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
275
276
277
278
279
280
281
282
283
284 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
285
286 list_add_tail(&meta->list, &kfence_freelist);
287 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
288
289 return NULL;
290 }
291
292 meta->addr = metadata_to_pageaddr(meta);
293
294 if (meta->state == KFENCE_OBJECT_FREED)
295 kfence_unprotect(meta->addr);
296
297
298
299
300
301
302
303
304
305 if (prandom_u32_max(2)) {
306
307 meta->addr += PAGE_SIZE - size;
308 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
309 }
310
311 addr = (void *)meta->addr;
312
313
314 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
315
316 WRITE_ONCE(meta->cache, cache);
317 meta->size = size;
318 for_each_canary(meta, set_canary_byte);
319
320
321 page = virt_to_page(meta->addr);
322 page->slab_cache = cache;
323 if (IS_ENABLED(CONFIG_SLUB))
324 page->objects = 1;
325 if (IS_ENABLED(CONFIG_SLAB))
326 page->s_mem = addr;
327
328 raw_spin_unlock_irqrestore(&meta->lock, flags);
329
330
331
332
333
334
335
336
337 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
338 memzero_explicit(addr, size);
339 if (cache->ctor)
340 cache->ctor(addr);
341
342 if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
343 kfence_protect(meta->addr);
344
345 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
346 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
347
348 return addr;
349}
350
351static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
352{
353 struct kcsan_scoped_access assert_page_exclusive;
354 unsigned long flags;
355
356 raw_spin_lock_irqsave(&meta->lock, flags);
357
358 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
359
360 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
361 kfence_report_error((unsigned long)addr, false, NULL, meta,
362 KFENCE_ERROR_INVALID_FREE);
363 raw_spin_unlock_irqrestore(&meta->lock, flags);
364 return;
365 }
366
367
368 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
369 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
370 &assert_page_exclusive);
371
372 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
373 kfence_unprotect((unsigned long)addr);
374
375
376 if (meta->unprotected_page) {
377 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
378 kfence_protect(meta->unprotected_page);
379 meta->unprotected_page = 0;
380 }
381
382
383 for_each_canary(meta, check_canary_byte);
384
385
386
387
388
389
390 if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
391 memzero_explicit(addr, meta->size);
392
393
394 metadata_update_state(meta, KFENCE_OBJECT_FREED);
395
396 raw_spin_unlock_irqrestore(&meta->lock, flags);
397
398
399 kfence_protect((unsigned long)addr);
400
401 kcsan_end_scoped_access(&assert_page_exclusive);
402 if (!zombie) {
403
404 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
405 KFENCE_WARN_ON(!list_empty(&meta->list));
406 list_add_tail(&meta->list, &kfence_freelist);
407 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
408
409 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
410 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
411 } else {
412
413 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
414 }
415}
416
417static void rcu_guarded_free(struct rcu_head *h)
418{
419 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
420
421 kfence_guarded_free((void *)meta->addr, meta, false);
422}
423
424static bool __init kfence_init_pool(void)
425{
426 unsigned long addr = (unsigned long)__kfence_pool;
427 struct page *pages;
428 int i;
429
430 if (!__kfence_pool)
431 return false;
432
433 if (!arch_kfence_init_pool())
434 goto err;
435
436 pages = virt_to_page(addr);
437
438
439
440
441
442
443
444
445
446 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
447 if (!i || (i % 2))
448 continue;
449
450
451 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
452 goto err;
453
454 __SetPageSlab(&pages[i]);
455 }
456
457
458
459
460
461
462
463 for (i = 0; i < 2; i++) {
464 if (unlikely(!kfence_protect(addr)))
465 goto err;
466
467 addr += PAGE_SIZE;
468 }
469
470 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
471 struct kfence_metadata *meta = &kfence_metadata[i];
472
473
474 INIT_LIST_HEAD(&meta->list);
475 raw_spin_lock_init(&meta->lock);
476 meta->state = KFENCE_OBJECT_UNUSED;
477 meta->addr = addr;
478 list_add_tail(&meta->list, &kfence_freelist);
479
480
481 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
482 goto err;
483
484 addr += 2 * PAGE_SIZE;
485 }
486
487
488
489
490
491
492
493 kmemleak_free(__kfence_pool);
494
495 return true;
496
497err:
498
499
500
501
502
503
504
505 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
506 __kfence_pool = NULL;
507 return false;
508}
509
510
511
512static int stats_show(struct seq_file *seq, void *v)
513{
514 int i;
515
516 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
517 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
518 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
519
520 return 0;
521}
522DEFINE_SHOW_ATTRIBUTE(stats);
523
524
525
526
527
528
529static void *start_object(struct seq_file *seq, loff_t *pos)
530{
531 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
532 return (void *)((long)*pos + 1);
533 return NULL;
534}
535
536static void stop_object(struct seq_file *seq, void *v)
537{
538}
539
540static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
541{
542 ++*pos;
543 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
544 return (void *)((long)*pos + 1);
545 return NULL;
546}
547
548static int show_object(struct seq_file *seq, void *v)
549{
550 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
551 unsigned long flags;
552
553 raw_spin_lock_irqsave(&meta->lock, flags);
554 kfence_print_object(seq, meta);
555 raw_spin_unlock_irqrestore(&meta->lock, flags);
556 seq_puts(seq, "---------------------------------\n");
557
558 return 0;
559}
560
561static const struct seq_operations object_seqops = {
562 .start = start_object,
563 .next = next_object,
564 .stop = stop_object,
565 .show = show_object,
566};
567
568static int open_objects(struct inode *inode, struct file *file)
569{
570 return seq_open(file, &object_seqops);
571}
572
573static const struct file_operations objects_fops = {
574 .open = open_objects,
575 .read = seq_read,
576 .llseek = seq_lseek,
577};
578
579static int __init kfence_debugfs_init(void)
580{
581 struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
582
583 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
584 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
585 return 0;
586}
587
588late_initcall(kfence_debugfs_init);
589
590
591
592#ifdef CONFIG_KFENCE_STATIC_KEYS
593
594static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
595
596static void wake_up_kfence_timer(struct irq_work *work)
597{
598 wake_up(&allocation_wait);
599}
600static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
601#endif
602
603
604
605
606
607
608
609
610
611
612
613
614static struct delayed_work kfence_timer;
615static void toggle_allocation_gate(struct work_struct *work)
616{
617 if (!READ_ONCE(kfence_enabled))
618 return;
619
620 atomic_set(&kfence_allocation_gate, 0);
621#ifdef CONFIG_KFENCE_STATIC_KEYS
622
623 static_branch_enable(&kfence_allocation_key);
624
625 if (sysctl_hung_task_timeout_secs) {
626
627
628
629
630 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
631 sysctl_hung_task_timeout_secs * HZ / 2);
632 } else {
633 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
634 }
635
636
637 static_branch_disable(&kfence_allocation_key);
638#endif
639 queue_delayed_work(system_unbound_wq, &kfence_timer,
640 msecs_to_jiffies(kfence_sample_interval));
641}
642static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
643
644
645
646void __init kfence_alloc_pool(void)
647{
648 if (!kfence_sample_interval)
649 return;
650
651 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
652
653 if (!__kfence_pool)
654 pr_err("failed to allocate pool\n");
655}
656
657void __init kfence_init(void)
658{
659
660 if (!kfence_sample_interval)
661 return;
662
663 if (!kfence_init_pool()) {
664 pr_err("%s failed\n", __func__);
665 return;
666 }
667
668 WRITE_ONCE(kfence_enabled, true);
669 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
670 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
671 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
672 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
673}
674
675void kfence_shutdown_cache(struct kmem_cache *s)
676{
677 unsigned long flags;
678 struct kfence_metadata *meta;
679 int i;
680
681 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
682 bool in_use;
683
684 meta = &kfence_metadata[i];
685
686
687
688
689
690
691
692
693 if (READ_ONCE(meta->cache) != s ||
694 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
695 continue;
696
697 raw_spin_lock_irqsave(&meta->lock, flags);
698 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
699 raw_spin_unlock_irqrestore(&meta->lock, flags);
700
701 if (in_use) {
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716 kfence_guarded_free((void *)meta->addr, meta, true);
717 }
718 }
719
720 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
721 meta = &kfence_metadata[i];
722
723
724 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
725 continue;
726
727 raw_spin_lock_irqsave(&meta->lock, flags);
728 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
729 meta->cache = NULL;
730 raw_spin_unlock_irqrestore(&meta->lock, flags);
731 }
732}
733
734void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
735{
736
737
738
739
740 if (size > PAGE_SIZE)
741 return NULL;
742
743
744
745
746
747
748 if ((flags & GFP_ZONEMASK) ||
749 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
750 return NULL;
751
752
753
754
755
756
757 if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
758 return NULL;
759#ifdef CONFIG_KFENCE_STATIC_KEYS
760
761
762
763
764 if (waitqueue_active(&allocation_wait)) {
765
766
767
768
769 irq_work_queue(&wake_up_kfence_timer_work);
770 }
771#endif
772
773 if (!READ_ONCE(kfence_enabled))
774 return NULL;
775
776 return kfence_guarded_alloc(s, size, flags);
777}
778
779size_t kfence_ksize(const void *addr)
780{
781 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
782
783
784
785
786
787 return meta ? meta->size : 0;
788}
789
790void *kfence_object_start(const void *addr)
791{
792 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
793
794
795
796
797
798 return meta ? (void *)meta->addr : NULL;
799}
800
801void __kfence_free(void *addr)
802{
803 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
804
805
806
807
808
809
810
811 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
812 call_rcu(&meta->rcu_head, rcu_guarded_free);
813 else
814 kfence_guarded_free(addr, meta, false);
815}
816
817bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
818{
819 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
820 struct kfence_metadata *to_report = NULL;
821 enum kfence_error_type error_type;
822 unsigned long flags;
823
824 if (!is_kfence_address((void *)addr))
825 return false;
826
827 if (!READ_ONCE(kfence_enabled))
828 return kfence_unprotect(addr);
829
830 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
831
832 if (page_index % 2) {
833
834 struct kfence_metadata *meta;
835 int distance = 0;
836
837 meta = addr_to_metadata(addr - PAGE_SIZE);
838 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
839 to_report = meta;
840
841 distance = addr - data_race(meta->addr + meta->size);
842 }
843
844 meta = addr_to_metadata(addr + PAGE_SIZE);
845 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
846
847 if (!to_report || distance > data_race(meta->addr) - addr)
848 to_report = meta;
849 }
850
851 if (!to_report)
852 goto out;
853
854 raw_spin_lock_irqsave(&to_report->lock, flags);
855 to_report->unprotected_page = addr;
856 error_type = KFENCE_ERROR_OOB;
857
858
859
860
861
862
863 } else {
864 to_report = addr_to_metadata(addr);
865 if (!to_report)
866 goto out;
867
868 raw_spin_lock_irqsave(&to_report->lock, flags);
869 error_type = KFENCE_ERROR_UAF;
870
871
872
873
874
875
876 }
877
878out:
879 if (to_report) {
880 kfence_report_error(addr, is_write, regs, to_report, error_type);
881 raw_spin_unlock_irqrestore(&to_report->lock, flags);
882 } else {
883
884 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
885 }
886
887 return kfence_unprotect(addr);
888}
889