1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/vmalloc.h>
24#include <linux/jhash.h>
25#include <linux/slab.h>
26#include <linux/sort.h>
27
28#include "tracing_map.h"
29#include "trace.h"
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
48{
49 atomic64_add(n, &elt->fields[i].sum);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
65{
66 return (u64)atomic64_read(&elt->fields[i].sum);
67}
68
69int tracing_map_cmp_string(void *val_a, void *val_b)
70{
71 char *a = val_a;
72 char *b = val_b;
73
74 return strcmp(a, b);
75}
76
77int tracing_map_cmp_none(void *val_a, void *val_b)
78{
79 return 0;
80}
81
82static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
83{
84 u64 a = atomic64_read((atomic64_t *)val_a);
85 u64 b = atomic64_read((atomic64_t *)val_b);
86
87 return (a > b) ? 1 : ((a < b) ? -1 : 0);
88}
89
90#define DEFINE_TRACING_MAP_CMP_FN(type) \
91static int tracing_map_cmp_##type(void *val_a, void *val_b) \
92{ \
93 type a = *(type *)val_a; \
94 type b = *(type *)val_b; \
95 \
96 return (a > b) ? 1 : ((a < b) ? -1 : 0); \
97}
98
99DEFINE_TRACING_MAP_CMP_FN(s64);
100DEFINE_TRACING_MAP_CMP_FN(u64);
101DEFINE_TRACING_MAP_CMP_FN(s32);
102DEFINE_TRACING_MAP_CMP_FN(u32);
103DEFINE_TRACING_MAP_CMP_FN(s16);
104DEFINE_TRACING_MAP_CMP_FN(u16);
105DEFINE_TRACING_MAP_CMP_FN(s8);
106DEFINE_TRACING_MAP_CMP_FN(u8);
107
108tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
109 int field_is_signed)
110{
111 tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
112
113 switch (field_size) {
114 case 8:
115 if (field_is_signed)
116 fn = tracing_map_cmp_s64;
117 else
118 fn = tracing_map_cmp_u64;
119 break;
120 case 4:
121 if (field_is_signed)
122 fn = tracing_map_cmp_s32;
123 else
124 fn = tracing_map_cmp_u32;
125 break;
126 case 2:
127 if (field_is_signed)
128 fn = tracing_map_cmp_s16;
129 else
130 fn = tracing_map_cmp_u16;
131 break;
132 case 1:
133 if (field_is_signed)
134 fn = tracing_map_cmp_s8;
135 else
136 fn = tracing_map_cmp_u8;
137 break;
138 }
139
140 return fn;
141}
142
143static int tracing_map_add_field(struct tracing_map *map,
144 tracing_map_cmp_fn_t cmp_fn)
145{
146 int ret = -EINVAL;
147
148 if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
149 ret = map->n_fields;
150 map->fields[map->n_fields++].cmp_fn = cmp_fn;
151 }
152
153 return ret;
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168int tracing_map_add_sum_field(struct tracing_map *map)
169{
170 return tracing_map_add_field(map, tracing_map_cmp_atomic64);
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189int tracing_map_add_key_field(struct tracing_map *map,
190 unsigned int offset,
191 tracing_map_cmp_fn_t cmp_fn)
192
193{
194 int idx = tracing_map_add_field(map, cmp_fn);
195
196 if (idx < 0)
197 return idx;
198
199 map->fields[idx].offset = offset;
200
201 map->key_idx[map->n_keys++] = idx;
202
203 return idx;
204}
205
206void tracing_map_array_clear(struct tracing_map_array *a)
207{
208 unsigned int i;
209
210 if (!a->pages)
211 return;
212
213 for (i = 0; i < a->n_pages; i++)
214 memset(a->pages[i], 0, PAGE_SIZE);
215}
216
217void tracing_map_array_free(struct tracing_map_array *a)
218{
219 unsigned int i;
220
221 if (!a)
222 return;
223
224 if (!a->pages)
225 goto free;
226
227 for (i = 0; i < a->n_pages; i++) {
228 if (!a->pages[i])
229 break;
230 free_page((unsigned long)a->pages[i]);
231 }
232
233 kfree(a->pages);
234
235 free:
236 kfree(a);
237}
238
239struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
240 unsigned int entry_size)
241{
242 struct tracing_map_array *a;
243 unsigned int i;
244
245 a = kzalloc(sizeof(*a), GFP_KERNEL);
246 if (!a)
247 return NULL;
248
249 a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
250 a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
251 a->n_pages = n_elts / a->entries_per_page;
252 if (!a->n_pages)
253 a->n_pages = 1;
254 a->entry_shift = fls(a->entries_per_page) - 1;
255 a->entry_mask = (1 << a->entry_shift) - 1;
256
257 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
258 if (!a->pages)
259 goto free;
260
261 for (i = 0; i < a->n_pages; i++) {
262 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
263 if (!a->pages[i])
264 goto free;
265 }
266 out:
267 return a;
268 free:
269 tracing_map_array_free(a);
270 a = NULL;
271
272 goto out;
273}
274
275static void tracing_map_elt_clear(struct tracing_map_elt *elt)
276{
277 unsigned i;
278
279 for (i = 0; i < elt->map->n_fields; i++)
280 if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
281 atomic64_set(&elt->fields[i].sum, 0);
282
283 if (elt->map->ops && elt->map->ops->elt_clear)
284 elt->map->ops->elt_clear(elt);
285}
286
287static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
288{
289 unsigned int i;
290
291 tracing_map_elt_clear(elt);
292
293 for (i = 0; i < elt->map->n_fields; i++) {
294 elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
295
296 if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
297 elt->fields[i].offset = elt->map->fields[i].offset;
298 }
299}
300
301static void tracing_map_elt_free(struct tracing_map_elt *elt)
302{
303 if (!elt)
304 return;
305
306 if (elt->map->ops && elt->map->ops->elt_free)
307 elt->map->ops->elt_free(elt);
308 kfree(elt->fields);
309 kfree(elt->key);
310 kfree(elt);
311}
312
313static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
314{
315 struct tracing_map_elt *elt;
316 int err = 0;
317
318 elt = kzalloc(sizeof(*elt), GFP_KERNEL);
319 if (!elt)
320 return ERR_PTR(-ENOMEM);
321
322 elt->map = map;
323
324 elt->key = kzalloc(map->key_size, GFP_KERNEL);
325 if (!elt->key) {
326 err = -ENOMEM;
327 goto free;
328 }
329
330 elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
331 if (!elt->fields) {
332 err = -ENOMEM;
333 goto free;
334 }
335
336 tracing_map_elt_init_fields(elt);
337
338 if (map->ops && map->ops->elt_alloc) {
339 err = map->ops->elt_alloc(elt);
340 if (err)
341 goto free;
342 }
343 return elt;
344 free:
345 tracing_map_elt_free(elt);
346
347 return ERR_PTR(err);
348}
349
350static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
351{
352 struct tracing_map_elt *elt = NULL;
353 int idx;
354
355 idx = atomic_inc_return(&map->next_elt);
356 if (idx < map->max_elts) {
357 elt = *(TRACING_MAP_ELT(map->elts, idx));
358 if (map->ops && map->ops->elt_init)
359 map->ops->elt_init(elt);
360 }
361
362 return elt;
363}
364
365static void tracing_map_free_elts(struct tracing_map *map)
366{
367 unsigned int i;
368
369 if (!map->elts)
370 return;
371
372 for (i = 0; i < map->max_elts; i++) {
373 tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
374 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
375 }
376
377 tracing_map_array_free(map->elts);
378 map->elts = NULL;
379}
380
381static int tracing_map_alloc_elts(struct tracing_map *map)
382{
383 unsigned int i;
384
385 map->elts = tracing_map_array_alloc(map->max_elts,
386 sizeof(struct tracing_map_elt *));
387 if (!map->elts)
388 return -ENOMEM;
389
390 for (i = 0; i < map->max_elts; i++) {
391 *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
392 if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
393 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
394 tracing_map_free_elts(map);
395
396 return -ENOMEM;
397 }
398 }
399
400 return 0;
401}
402
403static inline bool keys_match(void *key, void *test_key, unsigned key_size)
404{
405 bool match = true;
406
407 if (memcmp(key, test_key, key_size))
408 match = false;
409
410 return match;
411}
412
413static inline struct tracing_map_elt *
414__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
415{
416 u32 idx, key_hash, test_key;
417 struct tracing_map_entry *entry;
418
419 key_hash = jhash(key, map->key_size, 0);
420 if (key_hash == 0)
421 key_hash = 1;
422 idx = key_hash >> (32 - (map->map_bits + 1));
423
424 while (1) {
425 idx &= (map->map_size - 1);
426 entry = TRACING_MAP_ENTRY(map->map, idx);
427 test_key = entry->key;
428
429 if (test_key && test_key == key_hash && entry->val &&
430 keys_match(key, entry->val->key, map->key_size)) {
431 atomic64_inc(&map->hits);
432 return entry->val;
433 }
434
435 if (!test_key) {
436 if (lookup_only)
437 break;
438
439 if (!cmpxchg(&entry->key, 0, key_hash)) {
440 struct tracing_map_elt *elt;
441
442 elt = get_free_elt(map);
443 if (!elt) {
444 atomic64_inc(&map->drops);
445 entry->key = 0;
446 break;
447 }
448
449 memcpy(elt->key, key, map->key_size);
450 entry->val = elt;
451 atomic64_inc(&map->hits);
452
453 return entry->val;
454 }
455 }
456
457 idx++;
458 }
459
460 return NULL;
461}
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
501{
502 return __tracing_map_insert(map, key, false);
503}
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
523{
524 return __tracing_map_insert(map, key, true);
525}
526
527
528
529
530
531
532
533
534
535
536
537void tracing_map_destroy(struct tracing_map *map)
538{
539 if (!map)
540 return;
541
542 tracing_map_free_elts(map);
543
544 tracing_map_array_free(map->map);
545 kfree(map);
546}
547
548
549
550
551
552
553
554
555
556
557
558
559void tracing_map_clear(struct tracing_map *map)
560{
561 unsigned int i;
562
563 atomic_set(&map->next_elt, -1);
564 atomic64_set(&map->hits, 0);
565 atomic64_set(&map->drops, 0);
566
567 tracing_map_array_clear(map->map);
568
569 for (i = 0; i < map->max_elts; i++)
570 tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
571}
572
573static void set_sort_key(struct tracing_map *map,
574 struct tracing_map_sort_key *sort_key)
575{
576 map->sort_key = *sort_key;
577}
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629struct tracing_map *tracing_map_create(unsigned int map_bits,
630 unsigned int key_size,
631 const struct tracing_map_ops *ops,
632 void *private_data)
633{
634 struct tracing_map *map;
635 unsigned int i;
636
637 if (map_bits < TRACING_MAP_BITS_MIN ||
638 map_bits > TRACING_MAP_BITS_MAX)
639 return ERR_PTR(-EINVAL);
640
641 map = kzalloc(sizeof(*map), GFP_KERNEL);
642 if (!map)
643 return ERR_PTR(-ENOMEM);
644
645 map->map_bits = map_bits;
646 map->max_elts = (1 << map_bits);
647 atomic_set(&map->next_elt, -1);
648
649 map->map_size = (1 << (map_bits + 1));
650 map->ops = ops;
651
652 map->private_data = private_data;
653
654 map->map = tracing_map_array_alloc(map->map_size,
655 sizeof(struct tracing_map_entry));
656 if (!map->map)
657 goto free;
658
659 map->key_size = key_size;
660 for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
661 map->key_idx[i] = -1;
662 out:
663 return map;
664 free:
665 tracing_map_destroy(map);
666 map = ERR_PTR(-ENOMEM);
667
668 goto out;
669}
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690int tracing_map_init(struct tracing_map *map)
691{
692 int err;
693
694 if (map->n_fields < 2)
695 return -EINVAL;
696
697 err = tracing_map_alloc_elts(map);
698 if (err)
699 return err;
700
701 tracing_map_clear(map);
702
703 return err;
704}
705
706static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
707 const struct tracing_map_sort_entry **b)
708{
709 int ret = 0;
710
711 if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
712 ret = 1;
713
714 return ret;
715}
716
717static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
718 const struct tracing_map_sort_entry **b)
719{
720 const struct tracing_map_elt *elt_a, *elt_b;
721 struct tracing_map_sort_key *sort_key;
722 struct tracing_map_field *field;
723 tracing_map_cmp_fn_t cmp_fn;
724 void *val_a, *val_b;
725 int ret = 0;
726
727 elt_a = (*a)->elt;
728 elt_b = (*b)->elt;
729
730 sort_key = &elt_a->map->sort_key;
731
732 field = &elt_a->fields[sort_key->field_idx];
733 cmp_fn = field->cmp_fn;
734
735 val_a = &elt_a->fields[sort_key->field_idx].sum;
736 val_b = &elt_b->fields[sort_key->field_idx].sum;
737
738 ret = cmp_fn(val_a, val_b);
739 if (sort_key->descending)
740 ret = -ret;
741
742 return ret;
743}
744
745static int cmp_entries_key(const struct tracing_map_sort_entry **a,
746 const struct tracing_map_sort_entry **b)
747{
748 const struct tracing_map_elt *elt_a, *elt_b;
749 struct tracing_map_sort_key *sort_key;
750 struct tracing_map_field *field;
751 tracing_map_cmp_fn_t cmp_fn;
752 void *val_a, *val_b;
753 int ret = 0;
754
755 elt_a = (*a)->elt;
756 elt_b = (*b)->elt;
757
758 sort_key = &elt_a->map->sort_key;
759
760 field = &elt_a->fields[sort_key->field_idx];
761
762 cmp_fn = field->cmp_fn;
763
764 val_a = elt_a->key + field->offset;
765 val_b = elt_b->key + field->offset;
766
767 ret = cmp_fn(val_a, val_b);
768 if (sort_key->descending)
769 ret = -ret;
770
771 return ret;
772}
773
774static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
775{
776 if (!entry)
777 return;
778
779 if (entry->elt_copied)
780 tracing_map_elt_free(entry->elt);
781
782 kfree(entry);
783}
784
785
786
787
788
789
790
791
792void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
793 unsigned int n_entries)
794{
795 unsigned int i;
796
797 for (i = 0; i < n_entries; i++)
798 destroy_sort_entry(entries[i]);
799
800 vfree(entries);
801}
802
803static struct tracing_map_sort_entry *
804create_sort_entry(void *key, struct tracing_map_elt *elt)
805{
806 struct tracing_map_sort_entry *sort_entry;
807
808 sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
809 if (!sort_entry)
810 return NULL;
811
812 sort_entry->key = key;
813 sort_entry->elt = elt;
814
815 return sort_entry;
816}
817
818static struct tracing_map_elt *copy_elt(struct tracing_map_elt *elt)
819{
820 struct tracing_map_elt *dup_elt;
821 unsigned int i;
822
823 dup_elt = tracing_map_elt_alloc(elt->map);
824 if (IS_ERR(dup_elt))
825 return NULL;
826
827 if (elt->map->ops && elt->map->ops->elt_copy)
828 elt->map->ops->elt_copy(dup_elt, elt);
829
830 dup_elt->private_data = elt->private_data;
831 memcpy(dup_elt->key, elt->key, elt->map->key_size);
832
833 for (i = 0; i < elt->map->n_fields; i++) {
834 atomic64_set(&dup_elt->fields[i].sum,
835 atomic64_read(&elt->fields[i].sum));
836 dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn;
837 }
838
839 return dup_elt;
840}
841
842static int merge_dup(struct tracing_map_sort_entry **sort_entries,
843 unsigned int target, unsigned int dup)
844{
845 struct tracing_map_elt *target_elt, *elt;
846 bool first_dup = (target - dup) == 1;
847 int i;
848
849 if (first_dup) {
850 elt = sort_entries[target]->elt;
851 target_elt = copy_elt(elt);
852 if (!target_elt)
853 return -ENOMEM;
854 sort_entries[target]->elt = target_elt;
855 sort_entries[target]->elt_copied = true;
856 } else
857 target_elt = sort_entries[target]->elt;
858
859 elt = sort_entries[dup]->elt;
860
861 for (i = 0; i < elt->map->n_fields; i++)
862 atomic64_add(atomic64_read(&elt->fields[i].sum),
863 &target_elt->fields[i].sum);
864
865 sort_entries[dup]->dup = true;
866
867 return 0;
868}
869
870static int merge_dups(struct tracing_map_sort_entry **sort_entries,
871 int n_entries, unsigned int key_size)
872{
873 unsigned int dups = 0, total_dups = 0;
874 int err, i, j;
875 void *key;
876
877 if (n_entries < 2)
878 return total_dups;
879
880 sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
881 (int (*)(const void *, const void *))cmp_entries_dup, NULL);
882
883 key = sort_entries[0]->key;
884 for (i = 1; i < n_entries; i++) {
885 if (!memcmp(sort_entries[i]->key, key, key_size)) {
886 dups++; total_dups++;
887 err = merge_dup(sort_entries, i - dups, i);
888 if (err)
889 return err;
890 continue;
891 }
892 key = sort_entries[i]->key;
893 dups = 0;
894 }
895
896 if (!total_dups)
897 return total_dups;
898
899 for (i = 0, j = 0; i < n_entries; i++) {
900 if (!sort_entries[i]->dup) {
901 sort_entries[j] = sort_entries[i];
902 if (j++ != i)
903 sort_entries[i] = NULL;
904 } else {
905 destroy_sort_entry(sort_entries[i]);
906 sort_entries[i] = NULL;
907 }
908 }
909
910 return total_dups;
911}
912
913static bool is_key(struct tracing_map *map, unsigned int field_idx)
914{
915 unsigned int i;
916
917 for (i = 0; i < map->n_keys; i++)
918 if (map->key_idx[i] == field_idx)
919 return true;
920 return false;
921}
922
923static void sort_secondary(struct tracing_map *map,
924 const struct tracing_map_sort_entry **entries,
925 unsigned int n_entries,
926 struct tracing_map_sort_key *primary_key,
927 struct tracing_map_sort_key *secondary_key)
928{
929 int (*primary_fn)(const struct tracing_map_sort_entry **,
930 const struct tracing_map_sort_entry **);
931 int (*secondary_fn)(const struct tracing_map_sort_entry **,
932 const struct tracing_map_sort_entry **);
933 unsigned i, start = 0, n_sub = 1;
934
935 if (is_key(map, primary_key->field_idx))
936 primary_fn = cmp_entries_key;
937 else
938 primary_fn = cmp_entries_sum;
939
940 if (is_key(map, secondary_key->field_idx))
941 secondary_fn = cmp_entries_key;
942 else
943 secondary_fn = cmp_entries_sum;
944
945 for (i = 0; i < n_entries - 1; i++) {
946 const struct tracing_map_sort_entry **a = &entries[i];
947 const struct tracing_map_sort_entry **b = &entries[i + 1];
948
949 if (primary_fn(a, b) == 0) {
950 n_sub++;
951 if (i < n_entries - 2)
952 continue;
953 }
954
955 if (n_sub < 2) {
956 start = i + 1;
957 n_sub = 1;
958 continue;
959 }
960
961 set_sort_key(map, secondary_key);
962 sort(&entries[start], n_sub,
963 sizeof(struct tracing_map_sort_entry *),
964 (int (*)(const void *, const void *))secondary_fn, NULL);
965 set_sort_key(map, primary_key);
966
967 start = i + 1;
968 n_sub = 1;
969 }
970}
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996int tracing_map_sort_entries(struct tracing_map *map,
997 struct tracing_map_sort_key *sort_keys,
998 unsigned int n_sort_keys,
999 struct tracing_map_sort_entry ***sort_entries)
1000{
1001 int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
1002 const struct tracing_map_sort_entry **);
1003 struct tracing_map_sort_entry *sort_entry, **entries;
1004 int i, n_entries, ret;
1005
1006 entries = vmalloc(map->max_elts * sizeof(sort_entry));
1007 if (!entries)
1008 return -ENOMEM;
1009
1010 for (i = 0, n_entries = 0; i < map->map_size; i++) {
1011 struct tracing_map_entry *entry;
1012
1013 entry = TRACING_MAP_ENTRY(map->map, i);
1014
1015 if (!entry->key || !entry->val)
1016 continue;
1017
1018 entries[n_entries] = create_sort_entry(entry->val->key,
1019 entry->val);
1020 if (!entries[n_entries++]) {
1021 ret = -ENOMEM;
1022 goto free;
1023 }
1024 }
1025
1026 if (n_entries == 0) {
1027 ret = 0;
1028 goto free;
1029 }
1030
1031 if (n_entries == 1) {
1032 *sort_entries = entries;
1033 return 1;
1034 }
1035
1036 ret = merge_dups(entries, n_entries, map->key_size);
1037 if (ret < 0)
1038 goto free;
1039 n_entries -= ret;
1040
1041 if (is_key(map, sort_keys[0].field_idx))
1042 cmp_entries_fn = cmp_entries_key;
1043 else
1044 cmp_entries_fn = cmp_entries_sum;
1045
1046 set_sort_key(map, &sort_keys[0]);
1047
1048 sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
1049 (int (*)(const void *, const void *))cmp_entries_fn, NULL);
1050
1051 if (n_sort_keys > 1)
1052 sort_secondary(map,
1053 (const struct tracing_map_sort_entry **)entries,
1054 n_entries,
1055 &sort_keys[0],
1056 &sort_keys[1]);
1057
1058 *sort_entries = entries;
1059
1060 return n_entries;
1061 free:
1062 tracing_map_destroy_sort_entries(entries, n_entries);
1063
1064 return ret;
1065}
1066