1
2
3
4
5
6
7
8#define pr_fmt(fmt) "kfence: " fmt
9
10#include <linux/atomic.h>
11#include <linux/bug.h>
12#include <linux/debugfs.h>
13#include <linux/irq_work.h>
14#include <linux/kcsan-checks.h>
15#include <linux/kfence.h>
16#include <linux/kmemleak.h>
17#include <linux/list.h>
18#include <linux/lockdep.h>
19#include <linux/memblock.h>
20#include <linux/moduleparam.h>
21#include <linux/random.h>
22#include <linux/rcupdate.h>
23#include <linux/sched/clock.h>
24#include <linux/sched/sysctl.h>
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <linux/string.h>
29
30#include <asm/kfence.h>
31
32#include "kfence.h"
33
34
35#define KFENCE_WARN_ON(cond) \
36 ({ \
37 const bool __cond = WARN_ON(cond); \
38 if (unlikely(__cond)) \
39 WRITE_ONCE(kfence_enabled, false); \
40 __cond; \
41 })
42
43
44
45static bool kfence_enabled __read_mostly;
46
47static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
48
49#ifdef MODULE_PARAM_PREFIX
50#undef MODULE_PARAM_PREFIX
51#endif
52#define MODULE_PARAM_PREFIX "kfence."
53
54static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
55{
56 unsigned long num;
57 int ret = kstrtoul(val, 0, &num);
58
59 if (ret < 0)
60 return ret;
61
62 if (!num)
63 WRITE_ONCE(kfence_enabled, false);
64 else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
65 return -EINVAL;
66
67 *((unsigned long *)kp->arg) = num;
68 return 0;
69}
70
71static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
72{
73 if (!READ_ONCE(kfence_enabled))
74 return sprintf(buffer, "0\n");
75
76 return param_get_ulong(buffer, kp);
77}
78
79static const struct kernel_param_ops sample_interval_param_ops = {
80 .set = param_set_sample_interval,
81 .get = param_get_sample_interval,
82};
83module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
84
85
86char *__kfence_pool __ro_after_init;
87EXPORT_SYMBOL(__kfence_pool);
88
89
90
91
92
93static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
94struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
95
96
97static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
98static DEFINE_RAW_SPINLOCK(kfence_freelist_lock);
99
100#ifdef CONFIG_KFENCE_STATIC_KEYS
101
102DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
103#endif
104
105
106atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
107
108
109enum kfence_counter_id {
110 KFENCE_COUNTER_ALLOCATED,
111 KFENCE_COUNTER_ALLOCS,
112 KFENCE_COUNTER_FREES,
113 KFENCE_COUNTER_ZOMBIES,
114 KFENCE_COUNTER_BUGS,
115 KFENCE_COUNTER_COUNT,
116};
117static atomic_long_t counters[KFENCE_COUNTER_COUNT];
118static const char *const counter_names[] = {
119 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
120 [KFENCE_COUNTER_ALLOCS] = "total allocations",
121 [KFENCE_COUNTER_FREES] = "total frees",
122 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
123 [KFENCE_COUNTER_BUGS] = "total bugs",
124};
125static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
126
127
128
129static bool kfence_protect(unsigned long addr)
130{
131 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
132}
133
134static bool kfence_unprotect(unsigned long addr)
135{
136 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
137}
138
139static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
140{
141 long index;
142
143
144
145 if (!is_kfence_address((void *)addr))
146 return NULL;
147
148
149
150
151
152
153 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
154 if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
155 return NULL;
156
157 return &kfence_metadata[index];
158}
159
160static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
161{
162 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
163 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
164
165
166
167
168 if (KFENCE_WARN_ON(meta < kfence_metadata ||
169 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
170 return 0;
171
172
173
174
175
176 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
177 return 0;
178
179 return pageaddr;
180}
181
182
183
184
185
186static noinline void metadata_update_state(struct kfence_metadata *meta,
187 enum kfence_object_state next)
188{
189 struct kfence_track *track =
190 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
191
192 lockdep_assert_held(&meta->lock);
193
194
195
196
197
198 track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
199 track->pid = task_pid_nr(current);
200 track->cpu = raw_smp_processor_id();
201 track->ts_nsec = local_clock();
202
203
204
205
206
207
208 WRITE_ONCE(meta->state, next);
209}
210
211
212static inline bool set_canary_byte(u8 *addr)
213{
214 *addr = KFENCE_CANARY_PATTERN(addr);
215 return true;
216}
217
218
219static inline bool check_canary_byte(u8 *addr)
220{
221 if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
222 return true;
223
224 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
225 kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
226 KFENCE_ERROR_CORRUPTION);
227 return false;
228}
229
230
231static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
232{
233 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
234 unsigned long addr;
235
236 lockdep_assert_held(&meta->lock);
237
238
239
240
241
242
243
244
245
246
247
248 for (addr = pageaddr; addr < meta->addr; addr++) {
249 if (!fn((u8 *)addr))
250 break;
251 }
252
253
254 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
255 if (!fn((u8 *)addr))
256 break;
257 }
258}
259
260static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
261{
262 struct kfence_metadata *meta = NULL;
263 unsigned long flags;
264 struct page *page;
265 void *addr;
266
267
268 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
269 if (!list_empty(&kfence_freelist)) {
270 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
271 list_del_init(&meta->list);
272 }
273 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
274 if (!meta)
275 return NULL;
276
277 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
278
279
280
281
282
283
284
285
286
287 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
288
289 list_add_tail(&meta->list, &kfence_freelist);
290 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
291
292 return NULL;
293 }
294
295 meta->addr = metadata_to_pageaddr(meta);
296
297 if (meta->state == KFENCE_OBJECT_FREED)
298 kfence_unprotect(meta->addr);
299
300
301
302
303
304
305
306
307
308 if (prandom_u32_max(2)) {
309
310 meta->addr += PAGE_SIZE - size;
311 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
312 }
313
314 addr = (void *)meta->addr;
315
316
317 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
318
319 WRITE_ONCE(meta->cache, cache);
320 meta->size = size;
321 for_each_canary(meta, set_canary_byte);
322
323
324 page = virt_to_page(meta->addr);
325 page->slab_cache = cache;
326 if (IS_ENABLED(CONFIG_SLUB))
327 page->objects = 1;
328 if (IS_ENABLED(CONFIG_SLAB))
329 page->s_mem = addr;
330
331 raw_spin_unlock_irqrestore(&meta->lock, flags);
332
333
334
335
336
337
338
339
340 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
341 memzero_explicit(addr, size);
342 if (cache->ctor)
343 cache->ctor(addr);
344
345 if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
346 kfence_protect(meta->addr);
347
348 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
349 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
350
351 return addr;
352}
353
354static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
355{
356 struct kcsan_scoped_access assert_page_exclusive;
357 unsigned long flags;
358
359 raw_spin_lock_irqsave(&meta->lock, flags);
360
361 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
362
363 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
364 kfence_report_error((unsigned long)addr, false, NULL, meta,
365 KFENCE_ERROR_INVALID_FREE);
366 raw_spin_unlock_irqrestore(&meta->lock, flags);
367 return;
368 }
369
370
371 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
372 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
373 &assert_page_exclusive);
374
375 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
376 kfence_unprotect((unsigned long)addr);
377
378
379 if (meta->unprotected_page) {
380 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
381 kfence_protect(meta->unprotected_page);
382 meta->unprotected_page = 0;
383 }
384
385
386 for_each_canary(meta, check_canary_byte);
387
388
389
390
391
392
393 if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
394 memzero_explicit(addr, meta->size);
395
396
397 metadata_update_state(meta, KFENCE_OBJECT_FREED);
398
399 raw_spin_unlock_irqrestore(&meta->lock, flags);
400
401
402 kfence_protect((unsigned long)addr);
403
404 kcsan_end_scoped_access(&assert_page_exclusive);
405 if (!zombie) {
406
407 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
408 KFENCE_WARN_ON(!list_empty(&meta->list));
409 list_add_tail(&meta->list, &kfence_freelist);
410 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
411
412 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
413 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
414 } else {
415
416 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
417 }
418}
419
420static void rcu_guarded_free(struct rcu_head *h)
421{
422 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
423
424 kfence_guarded_free((void *)meta->addr, meta, false);
425}
426
427static bool __init kfence_init_pool(void)
428{
429 unsigned long addr = (unsigned long)__kfence_pool;
430 struct page *pages;
431 int i;
432
433 if (!__kfence_pool)
434 return false;
435
436 if (!arch_kfence_init_pool())
437 goto err;
438
439 pages = virt_to_page(addr);
440
441
442
443
444
445
446
447
448
449 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
450 if (!i || (i % 2))
451 continue;
452
453
454 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
455 goto err;
456
457 __SetPageSlab(&pages[i]);
458 }
459
460
461
462
463
464
465
466 for (i = 0; i < 2; i++) {
467 if (unlikely(!kfence_protect(addr)))
468 goto err;
469
470 addr += PAGE_SIZE;
471 }
472
473 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
474 struct kfence_metadata *meta = &kfence_metadata[i];
475
476
477 INIT_LIST_HEAD(&meta->list);
478 raw_spin_lock_init(&meta->lock);
479 meta->state = KFENCE_OBJECT_UNUSED;
480 meta->addr = addr;
481 list_add_tail(&meta->list, &kfence_freelist);
482
483
484 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
485 goto err;
486
487 addr += 2 * PAGE_SIZE;
488 }
489
490
491
492
493
494
495
496 kmemleak_free(__kfence_pool);
497
498 return true;
499
500err:
501
502
503
504
505
506
507
508 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
509 __kfence_pool = NULL;
510 return false;
511}
512
513
514
515static int stats_show(struct seq_file *seq, void *v)
516{
517 int i;
518
519 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
520 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
521 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
522
523 return 0;
524}
525DEFINE_SHOW_ATTRIBUTE(stats);
526
527
528
529
530
531
532static void *start_object(struct seq_file *seq, loff_t *pos)
533{
534 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
535 return (void *)((long)*pos + 1);
536 return NULL;
537}
538
539static void stop_object(struct seq_file *seq, void *v)
540{
541}
542
543static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
544{
545 ++*pos;
546 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
547 return (void *)((long)*pos + 1);
548 return NULL;
549}
550
551static int show_object(struct seq_file *seq, void *v)
552{
553 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
554 unsigned long flags;
555
556 raw_spin_lock_irqsave(&meta->lock, flags);
557 kfence_print_object(seq, meta);
558 raw_spin_unlock_irqrestore(&meta->lock, flags);
559 seq_puts(seq, "---------------------------------\n");
560
561 return 0;
562}
563
564static const struct seq_operations object_seqops = {
565 .start = start_object,
566 .next = next_object,
567 .stop = stop_object,
568 .show = show_object,
569};
570
571static int open_objects(struct inode *inode, struct file *file)
572{
573 return seq_open(file, &object_seqops);
574}
575
576static const struct file_operations objects_fops = {
577 .open = open_objects,
578 .read = seq_read,
579 .llseek = seq_lseek,
580};
581
582static int __init kfence_debugfs_init(void)
583{
584 struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
585
586 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
587 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
588 return 0;
589}
590
591late_initcall(kfence_debugfs_init);
592
593
594
595#ifdef CONFIG_KFENCE_STATIC_KEYS
596
597static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
598
599static void wake_up_kfence_timer(struct irq_work *work)
600{
601 wake_up(&allocation_wait);
602}
603static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
604#endif
605
606
607
608
609
610
611
612
613
614
615
616
617static struct delayed_work kfence_timer;
618static void toggle_allocation_gate(struct work_struct *work)
619{
620 if (!READ_ONCE(kfence_enabled))
621 return;
622
623 atomic_set(&kfence_allocation_gate, 0);
624#ifdef CONFIG_KFENCE_STATIC_KEYS
625
626 static_branch_enable(&kfence_allocation_key);
627
628 if (sysctl_hung_task_timeout_secs) {
629
630
631
632
633 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
634 sysctl_hung_task_timeout_secs * HZ / 2);
635 } else {
636 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
637 }
638
639
640 static_branch_disable(&kfence_allocation_key);
641#endif
642 queue_delayed_work(system_unbound_wq, &kfence_timer,
643 msecs_to_jiffies(kfence_sample_interval));
644}
645static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
646
647
648
649void __init kfence_alloc_pool(void)
650{
651 if (!kfence_sample_interval)
652 return;
653
654 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
655
656 if (!__kfence_pool)
657 pr_err("failed to allocate pool\n");
658}
659
660void __init kfence_init(void)
661{
662
663 if (!kfence_sample_interval)
664 return;
665
666 if (!kfence_init_pool()) {
667 pr_err("%s failed\n", __func__);
668 return;
669 }
670
671 WRITE_ONCE(kfence_enabled, true);
672 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
673 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
674 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
675 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
676}
677
678void kfence_shutdown_cache(struct kmem_cache *s)
679{
680 unsigned long flags;
681 struct kfence_metadata *meta;
682 int i;
683
684 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
685 bool in_use;
686
687 meta = &kfence_metadata[i];
688
689
690
691
692
693
694
695
696 if (READ_ONCE(meta->cache) != s ||
697 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
698 continue;
699
700 raw_spin_lock_irqsave(&meta->lock, flags);
701 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
702 raw_spin_unlock_irqrestore(&meta->lock, flags);
703
704 if (in_use) {
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719 kfence_guarded_free((void *)meta->addr, meta, true);
720 }
721 }
722
723 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
724 meta = &kfence_metadata[i];
725
726
727 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
728 continue;
729
730 raw_spin_lock_irqsave(&meta->lock, flags);
731 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
732 meta->cache = NULL;
733 raw_spin_unlock_irqrestore(&meta->lock, flags);
734 }
735}
736
737void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
738{
739
740
741
742
743 if (size > PAGE_SIZE)
744 return NULL;
745
746
747
748
749
750
751 if ((flags & GFP_ZONEMASK) ||
752 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
753 return NULL;
754
755
756
757
758
759
760 if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
761 return NULL;
762#ifdef CONFIG_KFENCE_STATIC_KEYS
763
764
765
766
767 if (waitqueue_active(&allocation_wait)) {
768
769
770
771
772 irq_work_queue(&wake_up_kfence_timer_work);
773 }
774#endif
775
776 if (!READ_ONCE(kfence_enabled))
777 return NULL;
778
779 return kfence_guarded_alloc(s, size, flags);
780}
781
782size_t kfence_ksize(const void *addr)
783{
784 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
785
786
787
788
789
790 return meta ? meta->size : 0;
791}
792
793void *kfence_object_start(const void *addr)
794{
795 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
796
797
798
799
800
801 return meta ? (void *)meta->addr : NULL;
802}
803
804void __kfence_free(void *addr)
805{
806 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
807
808
809
810
811
812
813
814 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
815 call_rcu(&meta->rcu_head, rcu_guarded_free);
816 else
817 kfence_guarded_free(addr, meta, false);
818}
819
820bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
821{
822 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
823 struct kfence_metadata *to_report = NULL;
824 enum kfence_error_type error_type;
825 unsigned long flags;
826
827 if (!is_kfence_address((void *)addr))
828 return false;
829
830 if (!READ_ONCE(kfence_enabled))
831 return kfence_unprotect(addr);
832
833 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
834
835 if (page_index % 2) {
836
837 struct kfence_metadata *meta;
838 int distance = 0;
839
840 meta = addr_to_metadata(addr - PAGE_SIZE);
841 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
842 to_report = meta;
843
844 distance = addr - data_race(meta->addr + meta->size);
845 }
846
847 meta = addr_to_metadata(addr + PAGE_SIZE);
848 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
849
850 if (!to_report || distance > data_race(meta->addr) - addr)
851 to_report = meta;
852 }
853
854 if (!to_report)
855 goto out;
856
857 raw_spin_lock_irqsave(&to_report->lock, flags);
858 to_report->unprotected_page = addr;
859 error_type = KFENCE_ERROR_OOB;
860
861
862
863
864
865
866 } else {
867 to_report = addr_to_metadata(addr);
868 if (!to_report)
869 goto out;
870
871 raw_spin_lock_irqsave(&to_report->lock, flags);
872 error_type = KFENCE_ERROR_UAF;
873
874
875
876
877
878
879 }
880
881out:
882 if (to_report) {
883 kfence_report_error(addr, is_write, regs, to_report, error_type);
884 raw_spin_unlock_irqrestore(&to_report->lock, flags);
885 } else {
886
887 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
888 }
889
890 return kfence_unprotect(addr);
891}
892