1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/vmalloc.h>
24#include <linux/jhash.h>
25#include <linux/slab.h>
26#include <linux/sort.h>
27
28#include "tracing_map.h"
29#include "trace.h"
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
48{
49 atomic64_add(n, &elt->fields[i].sum);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
65{
66 return (u64)atomic64_read(&elt->fields[i].sum);
67}
68
69int tracing_map_cmp_string(void *val_a, void *val_b)
70{
71 char *a = val_a;
72 char *b = val_b;
73
74 return strcmp(a, b);
75}
76
77int tracing_map_cmp_none(void *val_a, void *val_b)
78{
79 return 0;
80}
81
82static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
83{
84 u64 a = atomic64_read((atomic64_t *)val_a);
85 u64 b = atomic64_read((atomic64_t *)val_b);
86
87 return (a > b) ? 1 : ((a < b) ? -1 : 0);
88}
89
90#define DEFINE_TRACING_MAP_CMP_FN(type) \
91static int tracing_map_cmp_##type(void *val_a, void *val_b) \
92{ \
93 type a = *(type *)val_a; \
94 type b = *(type *)val_b; \
95 \
96 return (a > b) ? 1 : ((a < b) ? -1 : 0); \
97}
98
99DEFINE_TRACING_MAP_CMP_FN(s64);
100DEFINE_TRACING_MAP_CMP_FN(u64);
101DEFINE_TRACING_MAP_CMP_FN(s32);
102DEFINE_TRACING_MAP_CMP_FN(u32);
103DEFINE_TRACING_MAP_CMP_FN(s16);
104DEFINE_TRACING_MAP_CMP_FN(u16);
105DEFINE_TRACING_MAP_CMP_FN(s8);
106DEFINE_TRACING_MAP_CMP_FN(u8);
107
108tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
109 int field_is_signed)
110{
111 tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
112
113 switch (field_size) {
114 case 8:
115 if (field_is_signed)
116 fn = tracing_map_cmp_s64;
117 else
118 fn = tracing_map_cmp_u64;
119 break;
120 case 4:
121 if (field_is_signed)
122 fn = tracing_map_cmp_s32;
123 else
124 fn = tracing_map_cmp_u32;
125 break;
126 case 2:
127 if (field_is_signed)
128 fn = tracing_map_cmp_s16;
129 else
130 fn = tracing_map_cmp_u16;
131 break;
132 case 1:
133 if (field_is_signed)
134 fn = tracing_map_cmp_s8;
135 else
136 fn = tracing_map_cmp_u8;
137 break;
138 }
139
140 return fn;
141}
142
143static int tracing_map_add_field(struct tracing_map *map,
144 tracing_map_cmp_fn_t cmp_fn)
145{
146 int ret = -EINVAL;
147
148 if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
149 ret = map->n_fields;
150 map->fields[map->n_fields++].cmp_fn = cmp_fn;
151 }
152
153 return ret;
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168int tracing_map_add_sum_field(struct tracing_map *map)
169{
170 return tracing_map_add_field(map, tracing_map_cmp_atomic64);
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189int tracing_map_add_key_field(struct tracing_map *map,
190 unsigned int offset,
191 tracing_map_cmp_fn_t cmp_fn)
192
193{
194 int idx = tracing_map_add_field(map, cmp_fn);
195
196 if (idx < 0)
197 return idx;
198
199 map->fields[idx].offset = offset;
200
201 map->key_idx[map->n_keys++] = idx;
202
203 return idx;
204}
205
206void tracing_map_array_clear(struct tracing_map_array *a)
207{
208 unsigned int i;
209
210 if (!a->pages)
211 return;
212
213 for (i = 0; i < a->n_pages; i++)
214 memset(a->pages[i], 0, PAGE_SIZE);
215}
216
217void tracing_map_array_free(struct tracing_map_array *a)
218{
219 unsigned int i;
220
221 if (!a)
222 return;
223
224 if (!a->pages) {
225 kfree(a);
226 return;
227 }
228
229 for (i = 0; i < a->n_pages; i++) {
230 if (!a->pages[i])
231 break;
232 free_page((unsigned long)a->pages[i]);
233 }
234}
235
236struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
237 unsigned int entry_size)
238{
239 struct tracing_map_array *a;
240 unsigned int i;
241
242 a = kzalloc(sizeof(*a), GFP_KERNEL);
243 if (!a)
244 return NULL;
245
246 a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
247 a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
248 a->n_pages = n_elts / a->entries_per_page;
249 if (!a->n_pages)
250 a->n_pages = 1;
251 a->entry_shift = fls(a->entries_per_page) - 1;
252 a->entry_mask = (1 << a->entry_shift) - 1;
253
254 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
255 if (!a->pages)
256 goto free;
257
258 for (i = 0; i < a->n_pages; i++) {
259 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
260 if (!a->pages[i])
261 goto free;
262 }
263 out:
264 return a;
265 free:
266 tracing_map_array_free(a);
267 a = NULL;
268
269 goto out;
270}
271
272static void tracing_map_elt_clear(struct tracing_map_elt *elt)
273{
274 unsigned i;
275
276 for (i = 0; i < elt->map->n_fields; i++)
277 if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
278 atomic64_set(&elt->fields[i].sum, 0);
279
280 if (elt->map->ops && elt->map->ops->elt_clear)
281 elt->map->ops->elt_clear(elt);
282}
283
284static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
285{
286 unsigned int i;
287
288 tracing_map_elt_clear(elt);
289
290 for (i = 0; i < elt->map->n_fields; i++) {
291 elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
292
293 if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
294 elt->fields[i].offset = elt->map->fields[i].offset;
295 }
296}
297
298static void tracing_map_elt_free(struct tracing_map_elt *elt)
299{
300 if (!elt)
301 return;
302
303 if (elt->map->ops && elt->map->ops->elt_free)
304 elt->map->ops->elt_free(elt);
305 kfree(elt->fields);
306 kfree(elt->key);
307 kfree(elt);
308}
309
310static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
311{
312 struct tracing_map_elt *elt;
313 int err = 0;
314
315 elt = kzalloc(sizeof(*elt), GFP_KERNEL);
316 if (!elt)
317 return ERR_PTR(-ENOMEM);
318
319 elt->map = map;
320
321 elt->key = kzalloc(map->key_size, GFP_KERNEL);
322 if (!elt->key) {
323 err = -ENOMEM;
324 goto free;
325 }
326
327 elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
328 if (!elt->fields) {
329 err = -ENOMEM;
330 goto free;
331 }
332
333 tracing_map_elt_init_fields(elt);
334
335 if (map->ops && map->ops->elt_alloc) {
336 err = map->ops->elt_alloc(elt);
337 if (err)
338 goto free;
339 }
340 return elt;
341 free:
342 tracing_map_elt_free(elt);
343
344 return ERR_PTR(err);
345}
346
347static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
348{
349 struct tracing_map_elt *elt = NULL;
350 int idx;
351
352 idx = atomic_inc_return(&map->next_elt);
353 if (idx < map->max_elts) {
354 elt = *(TRACING_MAP_ELT(map->elts, idx));
355 if (map->ops && map->ops->elt_init)
356 map->ops->elt_init(elt);
357 }
358
359 return elt;
360}
361
362static void tracing_map_free_elts(struct tracing_map *map)
363{
364 unsigned int i;
365
366 if (!map->elts)
367 return;
368
369 for (i = 0; i < map->max_elts; i++) {
370 tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
371 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
372 }
373
374 tracing_map_array_free(map->elts);
375 map->elts = NULL;
376}
377
378static int tracing_map_alloc_elts(struct tracing_map *map)
379{
380 unsigned int i;
381
382 map->elts = tracing_map_array_alloc(map->max_elts,
383 sizeof(struct tracing_map_elt *));
384 if (!map->elts)
385 return -ENOMEM;
386
387 for (i = 0; i < map->max_elts; i++) {
388 *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
389 if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
390 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
391 tracing_map_free_elts(map);
392
393 return -ENOMEM;
394 }
395 }
396
397 return 0;
398}
399
400static inline bool keys_match(void *key, void *test_key, unsigned key_size)
401{
402 bool match = true;
403
404 if (memcmp(key, test_key, key_size))
405 match = false;
406
407 return match;
408}
409
410static inline struct tracing_map_elt *
411__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
412{
413 u32 idx, key_hash, test_key;
414 struct tracing_map_entry *entry;
415
416 key_hash = jhash(key, map->key_size, 0);
417 if (key_hash == 0)
418 key_hash = 1;
419 idx = key_hash >> (32 - (map->map_bits + 1));
420
421 while (1) {
422 idx &= (map->map_size - 1);
423 entry = TRACING_MAP_ENTRY(map->map, idx);
424 test_key = entry->key;
425
426 if (test_key && test_key == key_hash && entry->val &&
427 keys_match(key, entry->val->key, map->key_size)) {
428 atomic64_inc(&map->hits);
429 return entry->val;
430 }
431
432 if (!test_key) {
433 if (lookup_only)
434 break;
435
436 if (!cmpxchg(&entry->key, 0, key_hash)) {
437 struct tracing_map_elt *elt;
438
439 elt = get_free_elt(map);
440 if (!elt) {
441 atomic64_inc(&map->drops);
442 entry->key = 0;
443 break;
444 }
445
446 memcpy(elt->key, key, map->key_size);
447 entry->val = elt;
448 atomic64_inc(&map->hits);
449
450 return entry->val;
451 }
452 }
453
454 idx++;
455 }
456
457 return NULL;
458}
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
498{
499 return __tracing_map_insert(map, key, false);
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
520{
521 return __tracing_map_insert(map, key, true);
522}
523
524
525
526
527
528
529
530
531
532
533
534void tracing_map_destroy(struct tracing_map *map)
535{
536 if (!map)
537 return;
538
539 tracing_map_free_elts(map);
540
541 tracing_map_array_free(map->map);
542 kfree(map);
543}
544
545
546
547
548
549
550
551
552
553
554
555
556void tracing_map_clear(struct tracing_map *map)
557{
558 unsigned int i;
559
560 atomic_set(&map->next_elt, -1);
561 atomic64_set(&map->hits, 0);
562 atomic64_set(&map->drops, 0);
563
564 tracing_map_array_clear(map->map);
565
566 for (i = 0; i < map->max_elts; i++)
567 tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
568}
569
570static void set_sort_key(struct tracing_map *map,
571 struct tracing_map_sort_key *sort_key)
572{
573 map->sort_key = *sort_key;
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626struct tracing_map *tracing_map_create(unsigned int map_bits,
627 unsigned int key_size,
628 const struct tracing_map_ops *ops,
629 void *private_data)
630{
631 struct tracing_map *map;
632 unsigned int i;
633
634 if (map_bits < TRACING_MAP_BITS_MIN ||
635 map_bits > TRACING_MAP_BITS_MAX)
636 return ERR_PTR(-EINVAL);
637
638 map = kzalloc(sizeof(*map), GFP_KERNEL);
639 if (!map)
640 return ERR_PTR(-ENOMEM);
641
642 map->map_bits = map_bits;
643 map->max_elts = (1 << map_bits);
644 atomic_set(&map->next_elt, -1);
645
646 map->map_size = (1 << (map_bits + 1));
647 map->ops = ops;
648
649 map->private_data = private_data;
650
651 map->map = tracing_map_array_alloc(map->map_size,
652 sizeof(struct tracing_map_entry));
653 if (!map->map)
654 goto free;
655
656 map->key_size = key_size;
657 for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
658 map->key_idx[i] = -1;
659 out:
660 return map;
661 free:
662 tracing_map_destroy(map);
663 map = ERR_PTR(-ENOMEM);
664
665 goto out;
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687int tracing_map_init(struct tracing_map *map)
688{
689 int err;
690
691 if (map->n_fields < 2)
692 return -EINVAL;
693
694 err = tracing_map_alloc_elts(map);
695 if (err)
696 return err;
697
698 tracing_map_clear(map);
699
700 return err;
701}
702
703static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
704 const struct tracing_map_sort_entry **b)
705{
706 int ret = 0;
707
708 if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
709 ret = 1;
710
711 return ret;
712}
713
714static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
715 const struct tracing_map_sort_entry **b)
716{
717 const struct tracing_map_elt *elt_a, *elt_b;
718 struct tracing_map_sort_key *sort_key;
719 struct tracing_map_field *field;
720 tracing_map_cmp_fn_t cmp_fn;
721 void *val_a, *val_b;
722 int ret = 0;
723
724 elt_a = (*a)->elt;
725 elt_b = (*b)->elt;
726
727 sort_key = &elt_a->map->sort_key;
728
729 field = &elt_a->fields[sort_key->field_idx];
730 cmp_fn = field->cmp_fn;
731
732 val_a = &elt_a->fields[sort_key->field_idx].sum;
733 val_b = &elt_b->fields[sort_key->field_idx].sum;
734
735 ret = cmp_fn(val_a, val_b);
736 if (sort_key->descending)
737 ret = -ret;
738
739 return ret;
740}
741
742static int cmp_entries_key(const struct tracing_map_sort_entry **a,
743 const struct tracing_map_sort_entry **b)
744{
745 const struct tracing_map_elt *elt_a, *elt_b;
746 struct tracing_map_sort_key *sort_key;
747 struct tracing_map_field *field;
748 tracing_map_cmp_fn_t cmp_fn;
749 void *val_a, *val_b;
750 int ret = 0;
751
752 elt_a = (*a)->elt;
753 elt_b = (*b)->elt;
754
755 sort_key = &elt_a->map->sort_key;
756
757 field = &elt_a->fields[sort_key->field_idx];
758
759 cmp_fn = field->cmp_fn;
760
761 val_a = elt_a->key + field->offset;
762 val_b = elt_b->key + field->offset;
763
764 ret = cmp_fn(val_a, val_b);
765 if (sort_key->descending)
766 ret = -ret;
767
768 return ret;
769}
770
771static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
772{
773 if (!entry)
774 return;
775
776 if (entry->elt_copied)
777 tracing_map_elt_free(entry->elt);
778
779 kfree(entry);
780}
781
782
783
784
785
786
787
788
789void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
790 unsigned int n_entries)
791{
792 unsigned int i;
793
794 for (i = 0; i < n_entries; i++)
795 destroy_sort_entry(entries[i]);
796
797 vfree(entries);
798}
799
800static struct tracing_map_sort_entry *
801create_sort_entry(void *key, struct tracing_map_elt *elt)
802{
803 struct tracing_map_sort_entry *sort_entry;
804
805 sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
806 if (!sort_entry)
807 return NULL;
808
809 sort_entry->key = key;
810 sort_entry->elt = elt;
811
812 return sort_entry;
813}
814
815static struct tracing_map_elt *copy_elt(struct tracing_map_elt *elt)
816{
817 struct tracing_map_elt *dup_elt;
818 unsigned int i;
819
820 dup_elt = tracing_map_elt_alloc(elt->map);
821 if (IS_ERR(dup_elt))
822 return NULL;
823
824 if (elt->map->ops && elt->map->ops->elt_copy)
825 elt->map->ops->elt_copy(dup_elt, elt);
826
827 dup_elt->private_data = elt->private_data;
828 memcpy(dup_elt->key, elt->key, elt->map->key_size);
829
830 for (i = 0; i < elt->map->n_fields; i++) {
831 atomic64_set(&dup_elt->fields[i].sum,
832 atomic64_read(&elt->fields[i].sum));
833 dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn;
834 }
835
836 return dup_elt;
837}
838
839static int merge_dup(struct tracing_map_sort_entry **sort_entries,
840 unsigned int target, unsigned int dup)
841{
842 struct tracing_map_elt *target_elt, *elt;
843 bool first_dup = (target - dup) == 1;
844 int i;
845
846 if (first_dup) {
847 elt = sort_entries[target]->elt;
848 target_elt = copy_elt(elt);
849 if (!target_elt)
850 return -ENOMEM;
851 sort_entries[target]->elt = target_elt;
852 sort_entries[target]->elt_copied = true;
853 } else
854 target_elt = sort_entries[target]->elt;
855
856 elt = sort_entries[dup]->elt;
857
858 for (i = 0; i < elt->map->n_fields; i++)
859 atomic64_add(atomic64_read(&elt->fields[i].sum),
860 &target_elt->fields[i].sum);
861
862 sort_entries[dup]->dup = true;
863
864 return 0;
865}
866
867static int merge_dups(struct tracing_map_sort_entry **sort_entries,
868 int n_entries, unsigned int key_size)
869{
870 unsigned int dups = 0, total_dups = 0;
871 int err, i, j;
872 void *key;
873
874 if (n_entries < 2)
875 return total_dups;
876
877 sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
878 (int (*)(const void *, const void *))cmp_entries_dup, NULL);
879
880 key = sort_entries[0]->key;
881 for (i = 1; i < n_entries; i++) {
882 if (!memcmp(sort_entries[i]->key, key, key_size)) {
883 dups++; total_dups++;
884 err = merge_dup(sort_entries, i - dups, i);
885 if (err)
886 return err;
887 continue;
888 }
889 key = sort_entries[i]->key;
890 dups = 0;
891 }
892
893 if (!total_dups)
894 return total_dups;
895
896 for (i = 0, j = 0; i < n_entries; i++) {
897 if (!sort_entries[i]->dup) {
898 sort_entries[j] = sort_entries[i];
899 if (j++ != i)
900 sort_entries[i] = NULL;
901 } else {
902 destroy_sort_entry(sort_entries[i]);
903 sort_entries[i] = NULL;
904 }
905 }
906
907 return total_dups;
908}
909
910static bool is_key(struct tracing_map *map, unsigned int field_idx)
911{
912 unsigned int i;
913
914 for (i = 0; i < map->n_keys; i++)
915 if (map->key_idx[i] == field_idx)
916 return true;
917 return false;
918}
919
920static void sort_secondary(struct tracing_map *map,
921 const struct tracing_map_sort_entry **entries,
922 unsigned int n_entries,
923 struct tracing_map_sort_key *primary_key,
924 struct tracing_map_sort_key *secondary_key)
925{
926 int (*primary_fn)(const struct tracing_map_sort_entry **,
927 const struct tracing_map_sort_entry **);
928 int (*secondary_fn)(const struct tracing_map_sort_entry **,
929 const struct tracing_map_sort_entry **);
930 unsigned i, start = 0, n_sub = 1;
931
932 if (is_key(map, primary_key->field_idx))
933 primary_fn = cmp_entries_key;
934 else
935 primary_fn = cmp_entries_sum;
936
937 if (is_key(map, secondary_key->field_idx))
938 secondary_fn = cmp_entries_key;
939 else
940 secondary_fn = cmp_entries_sum;
941
942 for (i = 0; i < n_entries - 1; i++) {
943 const struct tracing_map_sort_entry **a = &entries[i];
944 const struct tracing_map_sort_entry **b = &entries[i + 1];
945
946 if (primary_fn(a, b) == 0) {
947 n_sub++;
948 if (i < n_entries - 2)
949 continue;
950 }
951
952 if (n_sub < 2) {
953 start = i + 1;
954 n_sub = 1;
955 continue;
956 }
957
958 set_sort_key(map, secondary_key);
959 sort(&entries[start], n_sub,
960 sizeof(struct tracing_map_sort_entry *),
961 (int (*)(const void *, const void *))secondary_fn, NULL);
962 set_sort_key(map, primary_key);
963
964 start = i + 1;
965 n_sub = 1;
966 }
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993int tracing_map_sort_entries(struct tracing_map *map,
994 struct tracing_map_sort_key *sort_keys,
995 unsigned int n_sort_keys,
996 struct tracing_map_sort_entry ***sort_entries)
997{
998 int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
999 const struct tracing_map_sort_entry **);
1000 struct tracing_map_sort_entry *sort_entry, **entries;
1001 int i, n_entries, ret;
1002
1003 entries = vmalloc(map->max_elts * sizeof(sort_entry));
1004 if (!entries)
1005 return -ENOMEM;
1006
1007 for (i = 0, n_entries = 0; i < map->map_size; i++) {
1008 struct tracing_map_entry *entry;
1009
1010 entry = TRACING_MAP_ENTRY(map->map, i);
1011
1012 if (!entry->key || !entry->val)
1013 continue;
1014
1015 entries[n_entries] = create_sort_entry(entry->val->key,
1016 entry->val);
1017 if (!entries[n_entries++]) {
1018 ret = -ENOMEM;
1019 goto free;
1020 }
1021 }
1022
1023 if (n_entries == 0) {
1024 ret = 0;
1025 goto free;
1026 }
1027
1028 if (n_entries == 1) {
1029 *sort_entries = entries;
1030 return 1;
1031 }
1032
1033 ret = merge_dups(entries, n_entries, map->key_size);
1034 if (ret < 0)
1035 goto free;
1036 n_entries -= ret;
1037
1038 if (is_key(map, sort_keys[0].field_idx))
1039 cmp_entries_fn = cmp_entries_key;
1040 else
1041 cmp_entries_fn = cmp_entries_sum;
1042
1043 set_sort_key(map, &sort_keys[0]);
1044
1045 sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
1046 (int (*)(const void *, const void *))cmp_entries_fn, NULL);
1047
1048 if (n_sort_keys > 1)
1049 sort_secondary(map,
1050 (const struct tracing_map_sort_entry **)entries,
1051 n_entries,
1052 &sort_keys[0],
1053 &sort_keys[1]);
1054
1055 *sort_entries = entries;
1056
1057 return n_entries;
1058 free:
1059 tracing_map_destroy_sort_entries(entries, n_entries);
1060
1061 return ret;
1062}
1063