1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/vmalloc.h>
24#include <linux/jhash.h>
25#include <linux/slab.h>
26#include <linux/sort.h>
27
28#include "tracing_map.h"
29#include "trace.h"
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
48{
49 atomic64_add(n, &elt->fields[i].sum);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
65{
66 return (u64)atomic64_read(&elt->fields[i].sum);
67}
68
69int tracing_map_cmp_string(void *val_a, void *val_b)
70{
71 char *a = val_a;
72 char *b = val_b;
73
74 return strcmp(a, b);
75}
76
77int tracing_map_cmp_none(void *val_a, void *val_b)
78{
79 return 0;
80}
81
82static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
83{
84 u64 a = atomic64_read((atomic64_t *)val_a);
85 u64 b = atomic64_read((atomic64_t *)val_b);
86
87 return (a > b) ? 1 : ((a < b) ? -1 : 0);
88}
89
90#define DEFINE_TRACING_MAP_CMP_FN(type) \
91static int tracing_map_cmp_##type(void *val_a, void *val_b) \
92{ \
93 type a = *(type *)val_a; \
94 type b = *(type *)val_b; \
95 \
96 return (a > b) ? 1 : ((a < b) ? -1 : 0); \
97}
98
99DEFINE_TRACING_MAP_CMP_FN(s64);
100DEFINE_TRACING_MAP_CMP_FN(u64);
101DEFINE_TRACING_MAP_CMP_FN(s32);
102DEFINE_TRACING_MAP_CMP_FN(u32);
103DEFINE_TRACING_MAP_CMP_FN(s16);
104DEFINE_TRACING_MAP_CMP_FN(u16);
105DEFINE_TRACING_MAP_CMP_FN(s8);
106DEFINE_TRACING_MAP_CMP_FN(u8);
107
108tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
109 int field_is_signed)
110{
111 tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
112
113 switch (field_size) {
114 case 8:
115 if (field_is_signed)
116 fn = tracing_map_cmp_s64;
117 else
118 fn = tracing_map_cmp_u64;
119 break;
120 case 4:
121 if (field_is_signed)
122 fn = tracing_map_cmp_s32;
123 else
124 fn = tracing_map_cmp_u32;
125 break;
126 case 2:
127 if (field_is_signed)
128 fn = tracing_map_cmp_s16;
129 else
130 fn = tracing_map_cmp_u16;
131 break;
132 case 1:
133 if (field_is_signed)
134 fn = tracing_map_cmp_s8;
135 else
136 fn = tracing_map_cmp_u8;
137 break;
138 }
139
140 return fn;
141}
142
143static int tracing_map_add_field(struct tracing_map *map,
144 tracing_map_cmp_fn_t cmp_fn)
145{
146 int ret = -EINVAL;
147
148 if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
149 ret = map->n_fields;
150 map->fields[map->n_fields++].cmp_fn = cmp_fn;
151 }
152
153 return ret;
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168int tracing_map_add_sum_field(struct tracing_map *map)
169{
170 return tracing_map_add_field(map, tracing_map_cmp_atomic64);
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189int tracing_map_add_key_field(struct tracing_map *map,
190 unsigned int offset,
191 tracing_map_cmp_fn_t cmp_fn)
192
193{
194 int idx = tracing_map_add_field(map, cmp_fn);
195
196 if (idx < 0)
197 return idx;
198
199 map->fields[idx].offset = offset;
200
201 map->key_idx[map->n_keys++] = idx;
202
203 return idx;
204}
205
206void tracing_map_array_clear(struct tracing_map_array *a)
207{
208 unsigned int i;
209
210 if (!a->pages)
211 return;
212
213 for (i = 0; i < a->n_pages; i++)
214 memset(a->pages[i], 0, PAGE_SIZE);
215}
216
217void tracing_map_array_free(struct tracing_map_array *a)
218{
219 unsigned int i;
220
221 if (!a)
222 return;
223
224 if (!a->pages)
225 goto free;
226
227 for (i = 0; i < a->n_pages; i++) {
228 if (!a->pages[i])
229 break;
230 free_page((unsigned long)a->pages[i]);
231 }
232
233 kfree(a->pages);
234
235 free:
236 kfree(a);
237}
238
239struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
240 unsigned int entry_size)
241{
242 struct tracing_map_array *a;
243 unsigned int i;
244
245 a = kzalloc(sizeof(*a), GFP_KERNEL);
246 if (!a)
247 return NULL;
248
249 a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
250 a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
251 a->n_pages = n_elts / a->entries_per_page;
252 if (!a->n_pages)
253 a->n_pages = 1;
254 a->entry_shift = fls(a->entries_per_page) - 1;
255 a->entry_mask = (1 << a->entry_shift) - 1;
256
257 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
258 if (!a->pages)
259 goto free;
260
261 for (i = 0; i < a->n_pages; i++) {
262 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
263 if (!a->pages[i])
264 goto free;
265 }
266 out:
267 return a;
268 free:
269 tracing_map_array_free(a);
270 a = NULL;
271
272 goto out;
273}
274
275static void tracing_map_elt_clear(struct tracing_map_elt *elt)
276{
277 unsigned i;
278
279 for (i = 0; i < elt->map->n_fields; i++)
280 if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
281 atomic64_set(&elt->fields[i].sum, 0);
282
283 if (elt->map->ops && elt->map->ops->elt_clear)
284 elt->map->ops->elt_clear(elt);
285}
286
287static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
288{
289 unsigned int i;
290
291 tracing_map_elt_clear(elt);
292
293 for (i = 0; i < elt->map->n_fields; i++) {
294 elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
295
296 if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
297 elt->fields[i].offset = elt->map->fields[i].offset;
298 }
299}
300
301static void tracing_map_elt_free(struct tracing_map_elt *elt)
302{
303 if (!elt)
304 return;
305
306 if (elt->map->ops && elt->map->ops->elt_free)
307 elt->map->ops->elt_free(elt);
308 kfree(elt->fields);
309 kfree(elt->key);
310 kfree(elt);
311}
312
313static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
314{
315 struct tracing_map_elt *elt;
316 int err = 0;
317
318 elt = kzalloc(sizeof(*elt), GFP_KERNEL);
319 if (!elt)
320 return ERR_PTR(-ENOMEM);
321
322 elt->map = map;
323
324 elt->key = kzalloc(map->key_size, GFP_KERNEL);
325 if (!elt->key) {
326 err = -ENOMEM;
327 goto free;
328 }
329
330 elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
331 if (!elt->fields) {
332 err = -ENOMEM;
333 goto free;
334 }
335
336 tracing_map_elt_init_fields(elt);
337
338 if (map->ops && map->ops->elt_alloc) {
339 err = map->ops->elt_alloc(elt);
340 if (err)
341 goto free;
342 }
343 return elt;
344 free:
345 tracing_map_elt_free(elt);
346
347 return ERR_PTR(err);
348}
349
350static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
351{
352 struct tracing_map_elt *elt = NULL;
353 int idx;
354
355 idx = atomic_inc_return(&map->next_elt);
356 if (idx < map->max_elts) {
357 elt = *(TRACING_MAP_ELT(map->elts, idx));
358 if (map->ops && map->ops->elt_init)
359 map->ops->elt_init(elt);
360 }
361
362 return elt;
363}
364
365static void tracing_map_free_elts(struct tracing_map *map)
366{
367 unsigned int i;
368
369 if (!map->elts)
370 return;
371
372 for (i = 0; i < map->max_elts; i++) {
373 tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
374 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
375 }
376
377 tracing_map_array_free(map->elts);
378 map->elts = NULL;
379}
380
381static int tracing_map_alloc_elts(struct tracing_map *map)
382{
383 unsigned int i;
384
385 map->elts = tracing_map_array_alloc(map->max_elts,
386 sizeof(struct tracing_map_elt *));
387 if (!map->elts)
388 return -ENOMEM;
389
390 for (i = 0; i < map->max_elts; i++) {
391 *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
392 if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
393 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
394 tracing_map_free_elts(map);
395
396 return -ENOMEM;
397 }
398 }
399
400 return 0;
401}
402
403static inline bool keys_match(void *key, void *test_key, unsigned key_size)
404{
405 bool match = true;
406
407 if (memcmp(key, test_key, key_size))
408 match = false;
409
410 return match;
411}
412
413static inline struct tracing_map_elt *
414__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
415{
416 u32 idx, key_hash, test_key;
417 struct tracing_map_entry *entry;
418
419 key_hash = jhash(key, map->key_size, 0);
420 if (key_hash == 0)
421 key_hash = 1;
422 idx = key_hash >> (32 - (map->map_bits + 1));
423
424 while (1) {
425 idx &= (map->map_size - 1);
426 entry = TRACING_MAP_ENTRY(map->map, idx);
427 test_key = entry->key;
428
429 if (test_key && test_key == key_hash && entry->val &&
430 keys_match(key, entry->val->key, map->key_size)) {
431 if (!lookup_only)
432 atomic64_inc(&map->hits);
433 return entry->val;
434 }
435
436 if (!test_key) {
437 if (lookup_only)
438 break;
439
440 if (!cmpxchg(&entry->key, 0, key_hash)) {
441 struct tracing_map_elt *elt;
442
443 elt = get_free_elt(map);
444 if (!elt) {
445 atomic64_inc(&map->drops);
446 entry->key = 0;
447 break;
448 }
449
450 memcpy(elt->key, key, map->key_size);
451 entry->val = elt;
452 atomic64_inc(&map->hits);
453
454 return entry->val;
455 }
456 }
457
458 idx++;
459 }
460
461 return NULL;
462}
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
502{
503 return __tracing_map_insert(map, key, false);
504}
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
524{
525 return __tracing_map_insert(map, key, true);
526}
527
528
529
530
531
532
533
534
535
536
537
538void tracing_map_destroy(struct tracing_map *map)
539{
540 if (!map)
541 return;
542
543 tracing_map_free_elts(map);
544
545 tracing_map_array_free(map->map);
546 kfree(map);
547}
548
549
550
551
552
553
554
555
556
557
558
559
560void tracing_map_clear(struct tracing_map *map)
561{
562 unsigned int i;
563
564 atomic_set(&map->next_elt, -1);
565 atomic64_set(&map->hits, 0);
566 atomic64_set(&map->drops, 0);
567
568 tracing_map_array_clear(map->map);
569
570 for (i = 0; i < map->max_elts; i++)
571 tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
572}
573
574static void set_sort_key(struct tracing_map *map,
575 struct tracing_map_sort_key *sort_key)
576{
577 map->sort_key = *sort_key;
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630struct tracing_map *tracing_map_create(unsigned int map_bits,
631 unsigned int key_size,
632 const struct tracing_map_ops *ops,
633 void *private_data)
634{
635 struct tracing_map *map;
636 unsigned int i;
637
638 if (map_bits < TRACING_MAP_BITS_MIN ||
639 map_bits > TRACING_MAP_BITS_MAX)
640 return ERR_PTR(-EINVAL);
641
642 map = kzalloc(sizeof(*map), GFP_KERNEL);
643 if (!map)
644 return ERR_PTR(-ENOMEM);
645
646 map->map_bits = map_bits;
647 map->max_elts = (1 << map_bits);
648 atomic_set(&map->next_elt, -1);
649
650 map->map_size = (1 << (map_bits + 1));
651 map->ops = ops;
652
653 map->private_data = private_data;
654
655 map->map = tracing_map_array_alloc(map->map_size,
656 sizeof(struct tracing_map_entry));
657 if (!map->map)
658 goto free;
659
660 map->key_size = key_size;
661 for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
662 map->key_idx[i] = -1;
663 out:
664 return map;
665 free:
666 tracing_map_destroy(map);
667 map = ERR_PTR(-ENOMEM);
668
669 goto out;
670}
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691int tracing_map_init(struct tracing_map *map)
692{
693 int err;
694
695 if (map->n_fields < 2)
696 return -EINVAL;
697
698 err = tracing_map_alloc_elts(map);
699 if (err)
700 return err;
701
702 tracing_map_clear(map);
703
704 return err;
705}
706
707static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
708 const struct tracing_map_sort_entry **b)
709{
710 int ret = 0;
711
712 if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
713 ret = 1;
714
715 return ret;
716}
717
718static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
719 const struct tracing_map_sort_entry **b)
720{
721 const struct tracing_map_elt *elt_a, *elt_b;
722 struct tracing_map_sort_key *sort_key;
723 struct tracing_map_field *field;
724 tracing_map_cmp_fn_t cmp_fn;
725 void *val_a, *val_b;
726 int ret = 0;
727
728 elt_a = (*a)->elt;
729 elt_b = (*b)->elt;
730
731 sort_key = &elt_a->map->sort_key;
732
733 field = &elt_a->fields[sort_key->field_idx];
734 cmp_fn = field->cmp_fn;
735
736 val_a = &elt_a->fields[sort_key->field_idx].sum;
737 val_b = &elt_b->fields[sort_key->field_idx].sum;
738
739 ret = cmp_fn(val_a, val_b);
740 if (sort_key->descending)
741 ret = -ret;
742
743 return ret;
744}
745
746static int cmp_entries_key(const struct tracing_map_sort_entry **a,
747 const struct tracing_map_sort_entry **b)
748{
749 const struct tracing_map_elt *elt_a, *elt_b;
750 struct tracing_map_sort_key *sort_key;
751 struct tracing_map_field *field;
752 tracing_map_cmp_fn_t cmp_fn;
753 void *val_a, *val_b;
754 int ret = 0;
755
756 elt_a = (*a)->elt;
757 elt_b = (*b)->elt;
758
759 sort_key = &elt_a->map->sort_key;
760
761 field = &elt_a->fields[sort_key->field_idx];
762
763 cmp_fn = field->cmp_fn;
764
765 val_a = elt_a->key + field->offset;
766 val_b = elt_b->key + field->offset;
767
768 ret = cmp_fn(val_a, val_b);
769 if (sort_key->descending)
770 ret = -ret;
771
772 return ret;
773}
774
775static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
776{
777 if (!entry)
778 return;
779
780 if (entry->elt_copied)
781 tracing_map_elt_free(entry->elt);
782
783 kfree(entry);
784}
785
786
787
788
789
790
791
792
793void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
794 unsigned int n_entries)
795{
796 unsigned int i;
797
798 for (i = 0; i < n_entries; i++)
799 destroy_sort_entry(entries[i]);
800
801 vfree(entries);
802}
803
804static struct tracing_map_sort_entry *
805create_sort_entry(void *key, struct tracing_map_elt *elt)
806{
807 struct tracing_map_sort_entry *sort_entry;
808
809 sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
810 if (!sort_entry)
811 return NULL;
812
813 sort_entry->key = key;
814 sort_entry->elt = elt;
815
816 return sort_entry;
817}
818
819static struct tracing_map_elt *copy_elt(struct tracing_map_elt *elt)
820{
821 struct tracing_map_elt *dup_elt;
822 unsigned int i;
823
824 dup_elt = tracing_map_elt_alloc(elt->map);
825 if (IS_ERR(dup_elt))
826 return NULL;
827
828 if (elt->map->ops && elt->map->ops->elt_copy)
829 elt->map->ops->elt_copy(dup_elt, elt);
830
831 dup_elt->private_data = elt->private_data;
832 memcpy(dup_elt->key, elt->key, elt->map->key_size);
833
834 for (i = 0; i < elt->map->n_fields; i++) {
835 atomic64_set(&dup_elt->fields[i].sum,
836 atomic64_read(&elt->fields[i].sum));
837 dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn;
838 }
839
840 return dup_elt;
841}
842
843static int merge_dup(struct tracing_map_sort_entry **sort_entries,
844 unsigned int target, unsigned int dup)
845{
846 struct tracing_map_elt *target_elt, *elt;
847 bool first_dup = (target - dup) == 1;
848 int i;
849
850 if (first_dup) {
851 elt = sort_entries[target]->elt;
852 target_elt = copy_elt(elt);
853 if (!target_elt)
854 return -ENOMEM;
855 sort_entries[target]->elt = target_elt;
856 sort_entries[target]->elt_copied = true;
857 } else
858 target_elt = sort_entries[target]->elt;
859
860 elt = sort_entries[dup]->elt;
861
862 for (i = 0; i < elt->map->n_fields; i++)
863 atomic64_add(atomic64_read(&elt->fields[i].sum),
864 &target_elt->fields[i].sum);
865
866 sort_entries[dup]->dup = true;
867
868 return 0;
869}
870
871static int merge_dups(struct tracing_map_sort_entry **sort_entries,
872 int n_entries, unsigned int key_size)
873{
874 unsigned int dups = 0, total_dups = 0;
875 int err, i, j;
876 void *key;
877
878 if (n_entries < 2)
879 return total_dups;
880
881 sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
882 (int (*)(const void *, const void *))cmp_entries_dup, NULL);
883
884 key = sort_entries[0]->key;
885 for (i = 1; i < n_entries; i++) {
886 if (!memcmp(sort_entries[i]->key, key, key_size)) {
887 dups++; total_dups++;
888 err = merge_dup(sort_entries, i - dups, i);
889 if (err)
890 return err;
891 continue;
892 }
893 key = sort_entries[i]->key;
894 dups = 0;
895 }
896
897 if (!total_dups)
898 return total_dups;
899
900 for (i = 0, j = 0; i < n_entries; i++) {
901 if (!sort_entries[i]->dup) {
902 sort_entries[j] = sort_entries[i];
903 if (j++ != i)
904 sort_entries[i] = NULL;
905 } else {
906 destroy_sort_entry(sort_entries[i]);
907 sort_entries[i] = NULL;
908 }
909 }
910
911 return total_dups;
912}
913
914static bool is_key(struct tracing_map *map, unsigned int field_idx)
915{
916 unsigned int i;
917
918 for (i = 0; i < map->n_keys; i++)
919 if (map->key_idx[i] == field_idx)
920 return true;
921 return false;
922}
923
924static void sort_secondary(struct tracing_map *map,
925 const struct tracing_map_sort_entry **entries,
926 unsigned int n_entries,
927 struct tracing_map_sort_key *primary_key,
928 struct tracing_map_sort_key *secondary_key)
929{
930 int (*primary_fn)(const struct tracing_map_sort_entry **,
931 const struct tracing_map_sort_entry **);
932 int (*secondary_fn)(const struct tracing_map_sort_entry **,
933 const struct tracing_map_sort_entry **);
934 unsigned i, start = 0, n_sub = 1;
935
936 if (is_key(map, primary_key->field_idx))
937 primary_fn = cmp_entries_key;
938 else
939 primary_fn = cmp_entries_sum;
940
941 if (is_key(map, secondary_key->field_idx))
942 secondary_fn = cmp_entries_key;
943 else
944 secondary_fn = cmp_entries_sum;
945
946 for (i = 0; i < n_entries - 1; i++) {
947 const struct tracing_map_sort_entry **a = &entries[i];
948 const struct tracing_map_sort_entry **b = &entries[i + 1];
949
950 if (primary_fn(a, b) == 0) {
951 n_sub++;
952 if (i < n_entries - 2)
953 continue;
954 }
955
956 if (n_sub < 2) {
957 start = i + 1;
958 n_sub = 1;
959 continue;
960 }
961
962 set_sort_key(map, secondary_key);
963 sort(&entries[start], n_sub,
964 sizeof(struct tracing_map_sort_entry *),
965 (int (*)(const void *, const void *))secondary_fn, NULL);
966 set_sort_key(map, primary_key);
967
968 start = i + 1;
969 n_sub = 1;
970 }
971}
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997int tracing_map_sort_entries(struct tracing_map *map,
998 struct tracing_map_sort_key *sort_keys,
999 unsigned int n_sort_keys,
1000 struct tracing_map_sort_entry ***sort_entries)
1001{
1002 int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
1003 const struct tracing_map_sort_entry **);
1004 struct tracing_map_sort_entry *sort_entry, **entries;
1005 int i, n_entries, ret;
1006
1007 entries = vmalloc(map->max_elts * sizeof(sort_entry));
1008 if (!entries)
1009 return -ENOMEM;
1010
1011 for (i = 0, n_entries = 0; i < map->map_size; i++) {
1012 struct tracing_map_entry *entry;
1013
1014 entry = TRACING_MAP_ENTRY(map->map, i);
1015
1016 if (!entry->key || !entry->val)
1017 continue;
1018
1019 entries[n_entries] = create_sort_entry(entry->val->key,
1020 entry->val);
1021 if (!entries[n_entries++]) {
1022 ret = -ENOMEM;
1023 goto free;
1024 }
1025 }
1026
1027 if (n_entries == 0) {
1028 ret = 0;
1029 goto free;
1030 }
1031
1032 if (n_entries == 1) {
1033 *sort_entries = entries;
1034 return 1;
1035 }
1036
1037 ret = merge_dups(entries, n_entries, map->key_size);
1038 if (ret < 0)
1039 goto free;
1040 n_entries -= ret;
1041
1042 if (is_key(map, sort_keys[0].field_idx))
1043 cmp_entries_fn = cmp_entries_key;
1044 else
1045 cmp_entries_fn = cmp_entries_sum;
1046
1047 set_sort_key(map, &sort_keys[0]);
1048
1049 sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
1050 (int (*)(const void *, const void *))cmp_entries_fn, NULL);
1051
1052 if (n_sort_keys > 1)
1053 sort_secondary(map,
1054 (const struct tracing_map_sort_entry **)entries,
1055 n_entries,
1056 &sort_keys[0],
1057 &sort_keys[1]);
1058
1059 *sort_entries = entries;
1060
1061 return n_entries;
1062 free:
1063 tracing_map_destroy_sort_entries(entries, n_entries);
1064
1065 return ret;
1066}
1067