1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/list.h>
21#include <linux/mempool.h>
22#include <linux/mm.h>
23#include <linux/elf.h>
24#include <linux/ftrace.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <asm/dwarf.h>
28#include <asm/unwinder.h>
29#include <asm/sections.h>
30#include <asm/unaligned.h>
31#include <asm/stacktrace.h>
32
33
34#define DWARF_FRAME_MIN_REQ 2
35
36#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
37
38static struct kmem_cache *dwarf_frame_cachep;
39static mempool_t *dwarf_frame_pool;
40
41static struct kmem_cache *dwarf_reg_cachep;
42static mempool_t *dwarf_reg_pool;
43
44static struct rb_root cie_root;
45static DEFINE_SPINLOCK(dwarf_cie_lock);
46
47static struct rb_root fde_root;
48static DEFINE_SPINLOCK(dwarf_fde_lock);
49
50static struct dwarf_cie *cached_cie;
51
52static unsigned int dwarf_unwinder_ready;
53
54
55
56
57
58
59
60
61
62
63
64
65static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
66 unsigned int reg_num)
67{
68 struct dwarf_reg *reg;
69
70 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
71 if (!reg) {
72 printk(KERN_WARNING "Unable to allocate a DWARF register\n");
73
74
75
76
77 UNWINDER_BUG();
78 }
79
80 reg->number = reg_num;
81 reg->addr = 0;
82 reg->flags = 0;
83
84 list_add(®->link, &frame->reg_list);
85
86 return reg;
87}
88
89static void dwarf_frame_free_regs(struct dwarf_frame *frame)
90{
91 struct dwarf_reg *reg, *n;
92
93 list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
94 list_del(®->link);
95 mempool_free(reg, dwarf_reg_pool);
96 }
97}
98
99
100
101
102
103
104
105
106
107static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
108 unsigned int reg_num)
109{
110 struct dwarf_reg *reg;
111
112 list_for_each_entry(reg, &frame->reg_list, link) {
113 if (reg->number == reg_num)
114 return reg;
115 }
116
117 return NULL;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
132{
133 u32 val = get_unaligned(src);
134 put_unaligned(val, dst);
135 return sizeof(unsigned long *);
136}
137
138
139
140
141
142
143
144
145
146
147
148static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
149{
150 unsigned int result;
151 unsigned char byte;
152 int shift, count;
153
154 result = 0;
155 shift = 0;
156 count = 0;
157
158 while (1) {
159 byte = __raw_readb(addr);
160 addr++;
161 count++;
162
163 result |= (byte & 0x7f) << shift;
164 shift += 7;
165
166 if (!(byte & 0x80))
167 break;
168 }
169
170 *ret = result;
171
172 return count;
173}
174
175
176
177
178
179
180
181
182
183static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
184{
185 unsigned char byte;
186 int result, shift;
187 int num_bits;
188 int count;
189
190 result = 0;
191 shift = 0;
192 count = 0;
193
194 while (1) {
195 byte = __raw_readb(addr);
196 addr++;
197 result |= (byte & 0x7f) << shift;
198 shift += 7;
199 count++;
200
201 if (!(byte & 0x80))
202 break;
203 }
204
205
206 num_bits = 8 * sizeof(result);
207
208 if ((shift < num_bits) && (byte & 0x40))
209 result |= (-1 << shift);
210
211 *ret = result;
212
213 return count;
214}
215
216
217
218
219
220
221
222
223
224
225
226static int dwarf_read_encoded_value(char *addr, unsigned long *val,
227 char encoding)
228{
229 unsigned long decoded_addr = 0;
230 int count = 0;
231
232 switch (encoding & 0x70) {
233 case DW_EH_PE_absptr:
234 break;
235 case DW_EH_PE_pcrel:
236 decoded_addr = (unsigned long)addr;
237 break;
238 default:
239 pr_debug("encoding=0x%x\n", (encoding & 0x70));
240 UNWINDER_BUG();
241 }
242
243 if ((encoding & 0x07) == 0x00)
244 encoding |= DW_EH_PE_udata4;
245
246 switch (encoding & 0x0f) {
247 case DW_EH_PE_sdata4:
248 case DW_EH_PE_udata4:
249 count += 4;
250 decoded_addr += get_unaligned((u32 *)addr);
251 __raw_writel(decoded_addr, val);
252 break;
253 default:
254 pr_debug("encoding=0x%x\n", encoding);
255 UNWINDER_BUG();
256 }
257
258 return count;
259}
260
261
262
263
264
265
266
267
268
269
270static inline int dwarf_entry_len(char *addr, unsigned long *len)
271{
272 u32 initial_len;
273 int count;
274
275 initial_len = get_unaligned((u32 *)addr);
276 count = 4;
277
278
279
280
281
282
283
284 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
285
286
287
288
289 if (initial_len == DW_EXT_DWARF64) {
290 *len = get_unaligned((u64 *)addr + 4);
291 count = 12;
292 } else {
293 printk(KERN_WARNING "Unknown DWARF extension\n");
294 count = 0;
295 }
296 } else
297 *len = initial_len;
298
299 return count;
300}
301
302
303
304
305
306static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
307{
308 struct rb_node **rb_node = &cie_root.rb_node;
309 struct dwarf_cie *cie = NULL;
310 unsigned long flags;
311
312 spin_lock_irqsave(&dwarf_cie_lock, flags);
313
314
315
316
317
318 if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
319 cie = cached_cie;
320 goto out;
321 }
322
323 while (*rb_node) {
324 struct dwarf_cie *cie_tmp;
325
326 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
327 BUG_ON(!cie_tmp);
328
329 if (cie_ptr == cie_tmp->cie_pointer) {
330 cie = cie_tmp;
331 cached_cie = cie_tmp;
332 goto out;
333 } else {
334 if (cie_ptr < cie_tmp->cie_pointer)
335 rb_node = &(*rb_node)->rb_left;
336 else
337 rb_node = &(*rb_node)->rb_right;
338 }
339 }
340
341out:
342 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
343 return cie;
344}
345
346
347
348
349
350struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
351{
352 struct rb_node **rb_node = &fde_root.rb_node;
353 struct dwarf_fde *fde = NULL;
354 unsigned long flags;
355
356 spin_lock_irqsave(&dwarf_fde_lock, flags);
357
358 while (*rb_node) {
359 struct dwarf_fde *fde_tmp;
360 unsigned long tmp_start, tmp_end;
361
362 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
363 BUG_ON(!fde_tmp);
364
365 tmp_start = fde_tmp->initial_location;
366 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
367
368 if (pc < tmp_start) {
369 rb_node = &(*rb_node)->rb_left;
370 } else {
371 if (pc < tmp_end) {
372 fde = fde_tmp;
373 goto out;
374 } else
375 rb_node = &(*rb_node)->rb_right;
376 }
377 }
378
379out:
380 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
381
382 return fde;
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399static int dwarf_cfa_execute_insns(unsigned char *insn_start,
400 unsigned char *insn_end,
401 struct dwarf_cie *cie,
402 struct dwarf_fde *fde,
403 struct dwarf_frame *frame,
404 unsigned long pc)
405{
406 unsigned char insn;
407 unsigned char *current_insn;
408 unsigned int count, delta, reg, expr_len, offset;
409 struct dwarf_reg *regp;
410
411 current_insn = insn_start;
412
413 while (current_insn < insn_end && frame->pc <= pc) {
414 insn = __raw_readb(current_insn++);
415
416
417
418
419
420 switch (DW_CFA_opcode(insn)) {
421 case DW_CFA_advance_loc:
422 delta = DW_CFA_operand(insn);
423 delta *= cie->code_alignment_factor;
424 frame->pc += delta;
425 continue;
426
427 case DW_CFA_offset:
428 reg = DW_CFA_operand(insn);
429 count = dwarf_read_uleb128(current_insn, &offset);
430 current_insn += count;
431 offset *= cie->data_alignment_factor;
432 regp = dwarf_frame_alloc_reg(frame, reg);
433 regp->addr = offset;
434 regp->flags |= DWARF_REG_OFFSET;
435 continue;
436
437 case DW_CFA_restore:
438 reg = DW_CFA_operand(insn);
439 continue;
440
441 }
442
443
444
445
446
447 switch (insn) {
448 case DW_CFA_nop:
449 continue;
450 case DW_CFA_advance_loc1:
451 delta = *current_insn++;
452 frame->pc += delta * cie->code_alignment_factor;
453 break;
454 case DW_CFA_advance_loc2:
455 delta = get_unaligned((u16 *)current_insn);
456 current_insn += 2;
457 frame->pc += delta * cie->code_alignment_factor;
458 break;
459 case DW_CFA_advance_loc4:
460 delta = get_unaligned((u32 *)current_insn);
461 current_insn += 4;
462 frame->pc += delta * cie->code_alignment_factor;
463 break;
464 case DW_CFA_offset_extended:
465 count = dwarf_read_uleb128(current_insn, ®);
466 current_insn += count;
467 count = dwarf_read_uleb128(current_insn, &offset);
468 current_insn += count;
469 offset *= cie->data_alignment_factor;
470 break;
471 case DW_CFA_restore_extended:
472 count = dwarf_read_uleb128(current_insn, ®);
473 current_insn += count;
474 break;
475 case DW_CFA_undefined:
476 count = dwarf_read_uleb128(current_insn, ®);
477 current_insn += count;
478 regp = dwarf_frame_alloc_reg(frame, reg);
479 regp->flags |= DWARF_UNDEFINED;
480 break;
481 case DW_CFA_def_cfa:
482 count = dwarf_read_uleb128(current_insn,
483 &frame->cfa_register);
484 current_insn += count;
485 count = dwarf_read_uleb128(current_insn,
486 &frame->cfa_offset);
487 current_insn += count;
488
489 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
490 break;
491 case DW_CFA_def_cfa_register:
492 count = dwarf_read_uleb128(current_insn,
493 &frame->cfa_register);
494 current_insn += count;
495 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
496 break;
497 case DW_CFA_def_cfa_offset:
498 count = dwarf_read_uleb128(current_insn, &offset);
499 current_insn += count;
500 frame->cfa_offset = offset;
501 break;
502 case DW_CFA_def_cfa_expression:
503 count = dwarf_read_uleb128(current_insn, &expr_len);
504 current_insn += count;
505
506 frame->cfa_expr = current_insn;
507 frame->cfa_expr_len = expr_len;
508 current_insn += expr_len;
509
510 frame->flags |= DWARF_FRAME_CFA_REG_EXP;
511 break;
512 case DW_CFA_offset_extended_sf:
513 count = dwarf_read_uleb128(current_insn, ®);
514 current_insn += count;
515 count = dwarf_read_leb128(current_insn, &offset);
516 current_insn += count;
517 offset *= cie->data_alignment_factor;
518 regp = dwarf_frame_alloc_reg(frame, reg);
519 regp->flags |= DWARF_REG_OFFSET;
520 regp->addr = offset;
521 break;
522 case DW_CFA_val_offset:
523 count = dwarf_read_uleb128(current_insn, ®);
524 current_insn += count;
525 count = dwarf_read_leb128(current_insn, &offset);
526 offset *= cie->data_alignment_factor;
527 regp = dwarf_frame_alloc_reg(frame, reg);
528 regp->flags |= DWARF_VAL_OFFSET;
529 regp->addr = offset;
530 break;
531 case DW_CFA_GNU_args_size:
532 count = dwarf_read_uleb128(current_insn, &offset);
533 current_insn += count;
534 break;
535 case DW_CFA_GNU_negative_offset_extended:
536 count = dwarf_read_uleb128(current_insn, ®);
537 current_insn += count;
538 count = dwarf_read_uleb128(current_insn, &offset);
539 offset *= cie->data_alignment_factor;
540
541 regp = dwarf_frame_alloc_reg(frame, reg);
542 regp->flags |= DWARF_REG_OFFSET;
543 regp->addr = -offset;
544 break;
545 default:
546 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
547 UNWINDER_BUG();
548 break;
549 }
550 }
551
552 return 0;
553}
554
555
556
557
558
559void dwarf_free_frame(struct dwarf_frame *frame)
560{
561 dwarf_frame_free_regs(frame);
562 mempool_free(frame, dwarf_frame_pool);
563}
564
565extern void ret_from_irq(void);
566
567
568
569
570
571
572
573
574
575
576
577struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
578 struct dwarf_frame *prev)
579{
580 struct dwarf_frame *frame;
581 struct dwarf_cie *cie;
582 struct dwarf_fde *fde;
583 struct dwarf_reg *reg;
584 unsigned long addr;
585
586
587
588
589
590 if (!dwarf_unwinder_ready)
591 return NULL;
592
593
594
595
596
597
598
599
600
601 if (!pc || !prev)
602 pc = (unsigned long)current_text_addr();
603
604#ifdef CONFIG_FUNCTION_GRAPH_TRACER
605
606
607
608
609
610 if (pc == (unsigned long)&return_to_handler) {
611 int index = current->curr_ret_stack;
612
613
614
615
616
617
618
619 WARN_ON(index > 0);
620
621 pc = current->ret_stack[index].ret;
622 }
623#endif
624
625 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
626 if (!frame) {
627 printk(KERN_ERR "Unable to allocate a dwarf frame\n");
628 UNWINDER_BUG();
629 }
630
631 INIT_LIST_HEAD(&frame->reg_list);
632 frame->flags = 0;
633 frame->prev = prev;
634 frame->return_addr = 0;
635
636 fde = dwarf_lookup_fde(pc);
637 if (!fde) {
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653 goto bail;
654 }
655
656 cie = dwarf_lookup_cie(fde->cie_pointer);
657
658 frame->pc = fde->initial_location;
659
660
661 dwarf_cfa_execute_insns(cie->initial_instructions,
662 cie->instructions_end, cie, fde,
663 frame, pc);
664
665
666 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
667 fde, frame, pc);
668
669
670 switch (frame->flags) {
671 case DWARF_FRAME_CFA_REG_OFFSET:
672 if (prev) {
673 reg = dwarf_frame_reg(prev, frame->cfa_register);
674 UNWINDER_BUG_ON(!reg);
675 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
676
677 addr = prev->cfa + reg->addr;
678 frame->cfa = __raw_readl(addr);
679
680 } else {
681
682
683
684
685
686
687
688 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
689 }
690
691 frame->cfa += frame->cfa_offset;
692 break;
693 default:
694 UNWINDER_BUG();
695 }
696
697 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
698
699
700
701
702
703
704 if (!reg || reg->flags == DWARF_UNDEFINED)
705 goto bail;
706
707 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
708
709 addr = frame->cfa + reg->addr;
710 frame->return_addr = __raw_readl(addr);
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727 if (prev && prev->pc == (unsigned long)ret_from_irq)
728 frame->return_addr = 0;
729
730 return frame;
731
732bail:
733 dwarf_free_frame(frame);
734 return NULL;
735}
736
737static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
738 unsigned char *end, struct module *mod)
739{
740 struct rb_node **rb_node = &cie_root.rb_node;
741 struct rb_node *parent = *rb_node;
742 struct dwarf_cie *cie;
743 unsigned long flags;
744 int count;
745
746 cie = kzalloc(sizeof(*cie), GFP_KERNEL);
747 if (!cie)
748 return -ENOMEM;
749
750 cie->length = len;
751
752
753
754
755
756
757
758 cie->cie_pointer = (unsigned long)entry;
759
760 cie->version = *(char *)p++;
761 UNWINDER_BUG_ON(cie->version != 1);
762
763 cie->augmentation = p;
764 p += strlen(cie->augmentation) + 1;
765
766 count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
767 p += count;
768
769 count = dwarf_read_leb128(p, &cie->data_alignment_factor);
770 p += count;
771
772
773
774
775
776 if (cie->version == 1) {
777 cie->return_address_reg = __raw_readb(p);
778 p++;
779 } else {
780 count = dwarf_read_uleb128(p, &cie->return_address_reg);
781 p += count;
782 }
783
784 if (cie->augmentation[0] == 'z') {
785 unsigned int length, count;
786 cie->flags |= DWARF_CIE_Z_AUGMENTATION;
787
788 count = dwarf_read_uleb128(p, &length);
789 p += count;
790
791 UNWINDER_BUG_ON((unsigned char *)p > end);
792
793 cie->initial_instructions = p + length;
794 cie->augmentation++;
795 }
796
797 while (*cie->augmentation) {
798
799
800
801
802 if (*cie->augmentation == 'L') {
803 p++;
804 cie->augmentation++;
805 } else if (*cie->augmentation == 'R') {
806
807
808
809
810
811 cie->encoding = *(char *)p++;
812 cie->augmentation++;
813 } else if (*cie->augmentation == 'P') {
814
815
816
817
818
819 UNWINDER_BUG();
820 } else if (*cie->augmentation == 'S') {
821 UNWINDER_BUG();
822 } else {
823
824
825
826
827 p = cie->initial_instructions;
828 UNWINDER_BUG_ON(!p);
829 break;
830 }
831 }
832
833 cie->initial_instructions = p;
834 cie->instructions_end = end;
835
836
837 spin_lock_irqsave(&dwarf_cie_lock, flags);
838
839 while (*rb_node) {
840 struct dwarf_cie *cie_tmp;
841
842 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
843
844 parent = *rb_node;
845
846 if (cie->cie_pointer < cie_tmp->cie_pointer)
847 rb_node = &parent->rb_left;
848 else if (cie->cie_pointer >= cie_tmp->cie_pointer)
849 rb_node = &parent->rb_right;
850 else
851 WARN_ON(1);
852 }
853
854 rb_link_node(&cie->node, parent, rb_node);
855 rb_insert_color(&cie->node, &cie_root);
856
857#ifdef CONFIG_MODULES
858 if (mod != NULL)
859 list_add_tail(&cie->link, &mod->arch.cie_list);
860#endif
861
862 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
863
864 return 0;
865}
866
867static int dwarf_parse_fde(void *entry, u32 entry_type,
868 void *start, unsigned long len,
869 unsigned char *end, struct module *mod)
870{
871 struct rb_node **rb_node = &fde_root.rb_node;
872 struct rb_node *parent = *rb_node;
873 struct dwarf_fde *fde;
874 struct dwarf_cie *cie;
875 unsigned long flags;
876 int count;
877 void *p = start;
878
879 fde = kzalloc(sizeof(*fde), GFP_KERNEL);
880 if (!fde)
881 return -ENOMEM;
882
883 fde->length = len;
884
885
886
887
888
889 fde->cie_pointer = (unsigned long)(p - entry_type - 4);
890
891 cie = dwarf_lookup_cie(fde->cie_pointer);
892 fde->cie = cie;
893
894 if (cie->encoding)
895 count = dwarf_read_encoded_value(p, &fde->initial_location,
896 cie->encoding);
897 else
898 count = dwarf_read_addr(p, &fde->initial_location);
899
900 p += count;
901
902 if (cie->encoding)
903 count = dwarf_read_encoded_value(p, &fde->address_range,
904 cie->encoding & 0x0f);
905 else
906 count = dwarf_read_addr(p, &fde->address_range);
907
908 p += count;
909
910 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
911 unsigned int length;
912 count = dwarf_read_uleb128(p, &length);
913 p += count + length;
914 }
915
916
917 fde->instructions = p;
918 fde->end = end;
919
920
921 spin_lock_irqsave(&dwarf_fde_lock, flags);
922
923 while (*rb_node) {
924 struct dwarf_fde *fde_tmp;
925 unsigned long tmp_start, tmp_end;
926 unsigned long start, end;
927
928 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
929
930 start = fde->initial_location;
931 end = fde->initial_location + fde->address_range;
932
933 tmp_start = fde_tmp->initial_location;
934 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
935
936 parent = *rb_node;
937
938 if (start < tmp_start)
939 rb_node = &parent->rb_left;
940 else if (start >= tmp_end)
941 rb_node = &parent->rb_right;
942 else
943 WARN_ON(1);
944 }
945
946 rb_link_node(&fde->node, parent, rb_node);
947 rb_insert_color(&fde->node, &fde_root);
948
949#ifdef CONFIG_MODULES
950 if (mod != NULL)
951 list_add_tail(&fde->link, &mod->arch.fde_list);
952#endif
953
954 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
955
956 return 0;
957}
958
959static void dwarf_unwinder_dump(struct task_struct *task,
960 struct pt_regs *regs,
961 unsigned long *sp,
962 const struct stacktrace_ops *ops,
963 void *data)
964{
965 struct dwarf_frame *frame, *_frame;
966 unsigned long return_addr;
967
968 _frame = NULL;
969 return_addr = 0;
970
971 while (1) {
972 frame = dwarf_unwind_stack(return_addr, _frame);
973
974 if (_frame)
975 dwarf_free_frame(_frame);
976
977 _frame = frame;
978
979 if (!frame || !frame->return_addr)
980 break;
981
982 return_addr = frame->return_addr;
983 ops->address(data, return_addr, 1);
984 }
985
986 if (frame)
987 dwarf_free_frame(frame);
988}
989
990static struct unwinder dwarf_unwinder = {
991 .name = "dwarf-unwinder",
992 .dump = dwarf_unwinder_dump,
993 .rating = 150,
994};
995
996static void __init dwarf_unwinder_cleanup(void)
997{
998 struct dwarf_fde *fde, *next_fde;
999 struct dwarf_cie *cie, *next_cie;
1000
1001
1002
1003
1004
1005
1006 rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node)
1007 kfree(fde);
1008
1009 rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
1010 kfree(cie);
1011
1012 mempool_destroy(dwarf_reg_pool);
1013 mempool_destroy(dwarf_frame_pool);
1014 kmem_cache_destroy(dwarf_reg_cachep);
1015 kmem_cache_destroy(dwarf_frame_cachep);
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
1027 struct module *mod)
1028{
1029 u32 entry_type;
1030 void *p, *entry;
1031 int count, err = 0;
1032 unsigned long len = 0;
1033 unsigned int c_entries, f_entries;
1034 unsigned char *end;
1035
1036 c_entries = 0;
1037 f_entries = 0;
1038 entry = eh_frame_start;
1039
1040 while ((char *)entry < eh_frame_end) {
1041 p = entry;
1042
1043 count = dwarf_entry_len(p, &len);
1044 if (count == 0) {
1045
1046
1047
1048
1049
1050
1051
1052 err = -EINVAL;
1053 goto out;
1054 } else
1055 p += count;
1056
1057
1058 end = p + len;
1059
1060 entry_type = get_unaligned((u32 *)p);
1061 p += 4;
1062
1063 if (entry_type == DW_EH_FRAME_CIE) {
1064 err = dwarf_parse_cie(entry, p, len, end, mod);
1065 if (err < 0)
1066 goto out;
1067 else
1068 c_entries++;
1069 } else {
1070 err = dwarf_parse_fde(entry, entry_type, p, len,
1071 end, mod);
1072 if (err < 0)
1073 goto out;
1074 else
1075 f_entries++;
1076 }
1077
1078 entry = (char *)entry + len + 4;
1079 }
1080
1081 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
1082 c_entries, f_entries);
1083
1084 return 0;
1085
1086out:
1087 return err;
1088}
1089
1090#ifdef CONFIG_MODULES
1091int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1092 struct module *me)
1093{
1094 unsigned int i, err;
1095 unsigned long start, end;
1096 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1097
1098 start = end = 0;
1099
1100 for (i = 1; i < hdr->e_shnum; i++) {
1101
1102 if ((sechdrs[i].sh_flags & SHF_ALLOC)
1103 && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
1104 start = sechdrs[i].sh_addr;
1105 end = start + sechdrs[i].sh_size;
1106 break;
1107 }
1108 }
1109
1110
1111 if (i != hdr->e_shnum) {
1112 INIT_LIST_HEAD(&me->arch.cie_list);
1113 INIT_LIST_HEAD(&me->arch.fde_list);
1114 err = dwarf_parse_section((char *)start, (char *)end, me);
1115 if (err) {
1116 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
1117 me->name);
1118 return err;
1119 }
1120 }
1121
1122 return 0;
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132void module_dwarf_cleanup(struct module *mod)
1133{
1134 struct dwarf_fde *fde, *ftmp;
1135 struct dwarf_cie *cie, *ctmp;
1136 unsigned long flags;
1137
1138 spin_lock_irqsave(&dwarf_cie_lock, flags);
1139
1140 list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
1141 list_del(&cie->link);
1142 rb_erase(&cie->node, &cie_root);
1143 kfree(cie);
1144 }
1145
1146 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1147
1148 spin_lock_irqsave(&dwarf_fde_lock, flags);
1149
1150 list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
1151 list_del(&fde->link);
1152 rb_erase(&fde->node, &fde_root);
1153 kfree(fde);
1154 }
1155
1156 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1157}
1158#endif
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static int __init dwarf_unwinder_init(void)
1170{
1171 int err = -ENOMEM;
1172
1173 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1174 sizeof(struct dwarf_frame), 0,
1175 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
1176
1177 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
1178 sizeof(struct dwarf_reg), 0,
1179 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
1180
1181 dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
1182 dwarf_frame_cachep);
1183 if (!dwarf_frame_pool)
1184 goto out;
1185
1186 dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ,
1187 dwarf_reg_cachep);
1188 if (!dwarf_reg_pool)
1189 goto out;
1190
1191 err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1192 if (err)
1193 goto out;
1194
1195 err = unwinder_register(&dwarf_unwinder);
1196 if (err)
1197 goto out;
1198
1199 dwarf_unwinder_ready = 1;
1200
1201 return 0;
1202
1203out:
1204 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
1205 dwarf_unwinder_cleanup();
1206 return err;
1207}
1208early_initcall(dwarf_unwinder_init);
1209