1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/vmalloc.h>
24#include <linux/jhash.h>
25#include <linux/slab.h>
26#include <linux/sort.h>
27
28#include "tracing_map.h"
29#include "trace.h"
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
48{
49 atomic64_add(n, &elt->fields[i].sum);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
65{
66 return (u64)atomic64_read(&elt->fields[i].sum);
67}
68
69
70
71
72
73
74
75
76
77
78
79void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n)
80{
81 atomic64_set(&elt->vars[i], n);
82 elt->var_set[i] = true;
83}
84
85
86
87
88
89
90
91
92
93
94bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i)
95{
96 return elt->var_set[i];
97}
98
99
100
101
102
103
104
105
106
107
108
109
110
111u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i)
112{
113 return (u64)atomic64_read(&elt->vars[i]);
114}
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130u64 tracing_map_read_var_once(struct tracing_map_elt *elt, unsigned int i)
131{
132 elt->var_set[i] = false;
133 return (u64)atomic64_read(&elt->vars[i]);
134}
135
136int tracing_map_cmp_string(void *val_a, void *val_b)
137{
138 char *a = val_a;
139 char *b = val_b;
140
141 return strcmp(a, b);
142}
143
144int tracing_map_cmp_none(void *val_a, void *val_b)
145{
146 return 0;
147}
148
149static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
150{
151 u64 a = atomic64_read((atomic64_t *)val_a);
152 u64 b = atomic64_read((atomic64_t *)val_b);
153
154 return (a > b) ? 1 : ((a < b) ? -1 : 0);
155}
156
157#define DEFINE_TRACING_MAP_CMP_FN(type) \
158static int tracing_map_cmp_##type(void *val_a, void *val_b) \
159{ \
160 type a = *(type *)val_a; \
161 type b = *(type *)val_b; \
162 \
163 return (a > b) ? 1 : ((a < b) ? -1 : 0); \
164}
165
166DEFINE_TRACING_MAP_CMP_FN(s64);
167DEFINE_TRACING_MAP_CMP_FN(u64);
168DEFINE_TRACING_MAP_CMP_FN(s32);
169DEFINE_TRACING_MAP_CMP_FN(u32);
170DEFINE_TRACING_MAP_CMP_FN(s16);
171DEFINE_TRACING_MAP_CMP_FN(u16);
172DEFINE_TRACING_MAP_CMP_FN(s8);
173DEFINE_TRACING_MAP_CMP_FN(u8);
174
175tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
176 int field_is_signed)
177{
178 tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
179
180 switch (field_size) {
181 case 8:
182 if (field_is_signed)
183 fn = tracing_map_cmp_s64;
184 else
185 fn = tracing_map_cmp_u64;
186 break;
187 case 4:
188 if (field_is_signed)
189 fn = tracing_map_cmp_s32;
190 else
191 fn = tracing_map_cmp_u32;
192 break;
193 case 2:
194 if (field_is_signed)
195 fn = tracing_map_cmp_s16;
196 else
197 fn = tracing_map_cmp_u16;
198 break;
199 case 1:
200 if (field_is_signed)
201 fn = tracing_map_cmp_s8;
202 else
203 fn = tracing_map_cmp_u8;
204 break;
205 }
206
207 return fn;
208}
209
210static int tracing_map_add_field(struct tracing_map *map,
211 tracing_map_cmp_fn_t cmp_fn)
212{
213 int ret = -EINVAL;
214
215 if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
216 ret = map->n_fields;
217 map->fields[map->n_fields++].cmp_fn = cmp_fn;
218 }
219
220 return ret;
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235int tracing_map_add_sum_field(struct tracing_map *map)
236{
237 return tracing_map_add_field(map, tracing_map_cmp_atomic64);
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252int tracing_map_add_var(struct tracing_map *map)
253{
254 int ret = -EINVAL;
255
256 if (map->n_vars < TRACING_MAP_VARS_MAX)
257 ret = map->n_vars++;
258
259 return ret;
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278int tracing_map_add_key_field(struct tracing_map *map,
279 unsigned int offset,
280 tracing_map_cmp_fn_t cmp_fn)
281
282{
283 int idx = tracing_map_add_field(map, cmp_fn);
284
285 if (idx < 0)
286 return idx;
287
288 map->fields[idx].offset = offset;
289
290 map->key_idx[map->n_keys++] = idx;
291
292 return idx;
293}
294
295void tracing_map_array_clear(struct tracing_map_array *a)
296{
297 unsigned int i;
298
299 if (!a->pages)
300 return;
301
302 for (i = 0; i < a->n_pages; i++)
303 memset(a->pages[i], 0, PAGE_SIZE);
304}
305
306void tracing_map_array_free(struct tracing_map_array *a)
307{
308 unsigned int i;
309
310 if (!a)
311 return;
312
313 if (!a->pages)
314 goto free;
315
316 for (i = 0; i < a->n_pages; i++) {
317 if (!a->pages[i])
318 break;
319 free_page((unsigned long)a->pages[i]);
320 }
321
322 kfree(a->pages);
323
324 free:
325 kfree(a);
326}
327
328struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
329 unsigned int entry_size)
330{
331 struct tracing_map_array *a;
332 unsigned int i;
333
334 a = kzalloc(sizeof(*a), GFP_KERNEL);
335 if (!a)
336 return NULL;
337
338 a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
339 a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
340 a->n_pages = n_elts / a->entries_per_page;
341 if (!a->n_pages)
342 a->n_pages = 1;
343 a->entry_shift = fls(a->entries_per_page) - 1;
344 a->entry_mask = (1 << a->entry_shift) - 1;
345
346 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
347 if (!a->pages)
348 goto free;
349
350 for (i = 0; i < a->n_pages; i++) {
351 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
352 if (!a->pages[i])
353 goto free;
354 }
355 out:
356 return a;
357 free:
358 tracing_map_array_free(a);
359 a = NULL;
360
361 goto out;
362}
363
364static void tracing_map_elt_clear(struct tracing_map_elt *elt)
365{
366 unsigned i;
367
368 for (i = 0; i < elt->map->n_fields; i++)
369 if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
370 atomic64_set(&elt->fields[i].sum, 0);
371
372 for (i = 0; i < elt->map->n_vars; i++) {
373 atomic64_set(&elt->vars[i], 0);
374 elt->var_set[i] = false;
375 }
376
377 if (elt->map->ops && elt->map->ops->elt_clear)
378 elt->map->ops->elt_clear(elt);
379}
380
381static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
382{
383 unsigned int i;
384
385 tracing_map_elt_clear(elt);
386
387 for (i = 0; i < elt->map->n_fields; i++) {
388 elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
389
390 if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
391 elt->fields[i].offset = elt->map->fields[i].offset;
392 }
393}
394
395static void tracing_map_elt_free(struct tracing_map_elt *elt)
396{
397 if (!elt)
398 return;
399
400 if (elt->map->ops && elt->map->ops->elt_free)
401 elt->map->ops->elt_free(elt);
402 kfree(elt->fields);
403 kfree(elt->vars);
404 kfree(elt->var_set);
405 kfree(elt->key);
406 kfree(elt);
407}
408
409static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
410{
411 struct tracing_map_elt *elt;
412 int err = 0;
413
414 elt = kzalloc(sizeof(*elt), GFP_KERNEL);
415 if (!elt)
416 return ERR_PTR(-ENOMEM);
417
418 elt->map = map;
419
420 elt->key = kzalloc(map->key_size, GFP_KERNEL);
421 if (!elt->key) {
422 err = -ENOMEM;
423 goto free;
424 }
425
426 elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
427 if (!elt->fields) {
428 err = -ENOMEM;
429 goto free;
430 }
431
432 elt->vars = kcalloc(map->n_vars, sizeof(*elt->vars), GFP_KERNEL);
433 if (!elt->vars) {
434 err = -ENOMEM;
435 goto free;
436 }
437
438 elt->var_set = kcalloc(map->n_vars, sizeof(*elt->var_set), GFP_KERNEL);
439 if (!elt->var_set) {
440 err = -ENOMEM;
441 goto free;
442 }
443
444 tracing_map_elt_init_fields(elt);
445
446 if (map->ops && map->ops->elt_alloc) {
447 err = map->ops->elt_alloc(elt);
448 if (err)
449 goto free;
450 }
451 return elt;
452 free:
453 tracing_map_elt_free(elt);
454
455 return ERR_PTR(err);
456}
457
458static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
459{
460 struct tracing_map_elt *elt = NULL;
461 int idx;
462
463 idx = atomic_inc_return(&map->next_elt);
464 if (idx < map->max_elts) {
465 elt = *(TRACING_MAP_ELT(map->elts, idx));
466 if (map->ops && map->ops->elt_init)
467 map->ops->elt_init(elt);
468 }
469
470 return elt;
471}
472
473static void tracing_map_free_elts(struct tracing_map *map)
474{
475 unsigned int i;
476
477 if (!map->elts)
478 return;
479
480 for (i = 0; i < map->max_elts; i++) {
481 tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
482 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
483 }
484
485 tracing_map_array_free(map->elts);
486 map->elts = NULL;
487}
488
489static int tracing_map_alloc_elts(struct tracing_map *map)
490{
491 unsigned int i;
492
493 map->elts = tracing_map_array_alloc(map->max_elts,
494 sizeof(struct tracing_map_elt *));
495 if (!map->elts)
496 return -ENOMEM;
497
498 for (i = 0; i < map->max_elts; i++) {
499 *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
500 if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
501 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
502 tracing_map_free_elts(map);
503
504 return -ENOMEM;
505 }
506 }
507
508 return 0;
509}
510
511static inline bool keys_match(void *key, void *test_key, unsigned key_size)
512{
513 bool match = true;
514
515 if (memcmp(key, test_key, key_size))
516 match = false;
517
518 return match;
519}
520
521static inline struct tracing_map_elt *
522__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
523{
524 u32 idx, key_hash, test_key;
525 int dup_try = 0;
526 struct tracing_map_entry *entry;
527 struct tracing_map_elt *val;
528
529 key_hash = jhash(key, map->key_size, 0);
530 if (key_hash == 0)
531 key_hash = 1;
532 idx = key_hash >> (32 - (map->map_bits + 1));
533
534 while (1) {
535 idx &= (map->map_size - 1);
536 entry = TRACING_MAP_ENTRY(map->map, idx);
537 test_key = entry->key;
538
539 if (test_key && test_key == key_hash) {
540 val = READ_ONCE(entry->val);
541 if (val &&
542 keys_match(key, val->key, map->key_size)) {
543 if (!lookup_only)
544 atomic64_inc(&map->hits);
545 return val;
546 } else if (unlikely(!val)) {
547
548
549
550
551
552
553
554
555
556
557
558
559 dup_try++;
560 if (dup_try > map->map_size) {
561 atomic64_inc(&map->drops);
562 break;
563 }
564 continue;
565 }
566 }
567
568 if (!test_key) {
569 if (lookup_only)
570 break;
571
572 if (!cmpxchg(&entry->key, 0, key_hash)) {
573 struct tracing_map_elt *elt;
574
575 elt = get_free_elt(map);
576 if (!elt) {
577 atomic64_inc(&map->drops);
578 entry->key = 0;
579 break;
580 }
581
582 memcpy(elt->key, key, map->key_size);
583 entry->val = elt;
584 atomic64_inc(&map->hits);
585
586 return entry->val;
587 } else {
588
589
590
591
592 dup_try++;
593 continue;
594 }
595 }
596
597 idx++;
598 }
599
600 return NULL;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
641{
642 return __tracing_map_insert(map, key, false);
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
663{
664 return __tracing_map_insert(map, key, true);
665}
666
667
668
669
670
671
672
673
674
675
676
677void tracing_map_destroy(struct tracing_map *map)
678{
679 if (!map)
680 return;
681
682 tracing_map_free_elts(map);
683
684 tracing_map_array_free(map->map);
685 kfree(map);
686}
687
688
689
690
691
692
693
694
695
696
697
698
699void tracing_map_clear(struct tracing_map *map)
700{
701 unsigned int i;
702
703 atomic_set(&map->next_elt, -1);
704 atomic64_set(&map->hits, 0);
705 atomic64_set(&map->drops, 0);
706
707 tracing_map_array_clear(map->map);
708
709 for (i = 0; i < map->max_elts; i++)
710 tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
711}
712
713static void set_sort_key(struct tracing_map *map,
714 struct tracing_map_sort_key *sort_key)
715{
716 map->sort_key = *sort_key;
717}
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769struct tracing_map *tracing_map_create(unsigned int map_bits,
770 unsigned int key_size,
771 const struct tracing_map_ops *ops,
772 void *private_data)
773{
774 struct tracing_map *map;
775 unsigned int i;
776
777 if (map_bits < TRACING_MAP_BITS_MIN ||
778 map_bits > TRACING_MAP_BITS_MAX)
779 return ERR_PTR(-EINVAL);
780
781 map = kzalloc(sizeof(*map), GFP_KERNEL);
782 if (!map)
783 return ERR_PTR(-ENOMEM);
784
785 map->map_bits = map_bits;
786 map->max_elts = (1 << map_bits);
787 atomic_set(&map->next_elt, -1);
788
789 map->map_size = (1 << (map_bits + 1));
790 map->ops = ops;
791
792 map->private_data = private_data;
793
794 map->map = tracing_map_array_alloc(map->map_size,
795 sizeof(struct tracing_map_entry));
796 if (!map->map)
797 goto free;
798
799 map->key_size = key_size;
800 for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
801 map->key_idx[i] = -1;
802 out:
803 return map;
804 free:
805 tracing_map_destroy(map);
806 map = ERR_PTR(-ENOMEM);
807
808 goto out;
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830int tracing_map_init(struct tracing_map *map)
831{
832 int err;
833
834 if (map->n_fields < 2)
835 return -EINVAL;
836
837 err = tracing_map_alloc_elts(map);
838 if (err)
839 return err;
840
841 tracing_map_clear(map);
842
843 return err;
844}
845
846static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
847 const struct tracing_map_sort_entry **b)
848{
849 int ret = 0;
850
851 if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
852 ret = 1;
853
854 return ret;
855}
856
857static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
858 const struct tracing_map_sort_entry **b)
859{
860 const struct tracing_map_elt *elt_a, *elt_b;
861 struct tracing_map_sort_key *sort_key;
862 struct tracing_map_field *field;
863 tracing_map_cmp_fn_t cmp_fn;
864 void *val_a, *val_b;
865 int ret = 0;
866
867 elt_a = (*a)->elt;
868 elt_b = (*b)->elt;
869
870 sort_key = &elt_a->map->sort_key;
871
872 field = &elt_a->fields[sort_key->field_idx];
873 cmp_fn = field->cmp_fn;
874
875 val_a = &elt_a->fields[sort_key->field_idx].sum;
876 val_b = &elt_b->fields[sort_key->field_idx].sum;
877
878 ret = cmp_fn(val_a, val_b);
879 if (sort_key->descending)
880 ret = -ret;
881
882 return ret;
883}
884
885static int cmp_entries_key(const struct tracing_map_sort_entry **a,
886 const struct tracing_map_sort_entry **b)
887{
888 const struct tracing_map_elt *elt_a, *elt_b;
889 struct tracing_map_sort_key *sort_key;
890 struct tracing_map_field *field;
891 tracing_map_cmp_fn_t cmp_fn;
892 void *val_a, *val_b;
893 int ret = 0;
894
895 elt_a = (*a)->elt;
896 elt_b = (*b)->elt;
897
898 sort_key = &elt_a->map->sort_key;
899
900 field = &elt_a->fields[sort_key->field_idx];
901
902 cmp_fn = field->cmp_fn;
903
904 val_a = elt_a->key + field->offset;
905 val_b = elt_b->key + field->offset;
906
907 ret = cmp_fn(val_a, val_b);
908 if (sort_key->descending)
909 ret = -ret;
910
911 return ret;
912}
913
914static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
915{
916 if (!entry)
917 return;
918
919 if (entry->elt_copied)
920 tracing_map_elt_free(entry->elt);
921
922 kfree(entry);
923}
924
925
926
927
928
929
930
931
932void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
933 unsigned int n_entries)
934{
935 unsigned int i;
936
937 for (i = 0; i < n_entries; i++)
938 destroy_sort_entry(entries[i]);
939
940 vfree(entries);
941}
942
943static struct tracing_map_sort_entry *
944create_sort_entry(void *key, struct tracing_map_elt *elt)
945{
946 struct tracing_map_sort_entry *sort_entry;
947
948 sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
949 if (!sort_entry)
950 return NULL;
951
952 sort_entry->key = key;
953 sort_entry->elt = elt;
954
955 return sort_entry;
956}
957
958static void detect_dups(struct tracing_map_sort_entry **sort_entries,
959 int n_entries, unsigned int key_size)
960{
961 unsigned int dups = 0, total_dups = 0;
962 int i;
963 void *key;
964
965 if (n_entries < 2)
966 return;
967
968 sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
969 (int (*)(const void *, const void *))cmp_entries_dup, NULL);
970
971 key = sort_entries[0]->key;
972 for (i = 1; i < n_entries; i++) {
973 if (!memcmp(sort_entries[i]->key, key, key_size)) {
974 dups++; total_dups++;
975 continue;
976 }
977 key = sort_entries[i]->key;
978 dups = 0;
979 }
980
981 WARN_ONCE(total_dups > 0,
982 "Duplicates detected: %d\n", total_dups);
983}
984
985static bool is_key(struct tracing_map *map, unsigned int field_idx)
986{
987 unsigned int i;
988
989 for (i = 0; i < map->n_keys; i++)
990 if (map->key_idx[i] == field_idx)
991 return true;
992 return false;
993}
994
995static void sort_secondary(struct tracing_map *map,
996 const struct tracing_map_sort_entry **entries,
997 unsigned int n_entries,
998 struct tracing_map_sort_key *primary_key,
999 struct tracing_map_sort_key *secondary_key)
1000{
1001 int (*primary_fn)(const struct tracing_map_sort_entry **,
1002 const struct tracing_map_sort_entry **);
1003 int (*secondary_fn)(const struct tracing_map_sort_entry **,
1004 const struct tracing_map_sort_entry **);
1005 unsigned i, start = 0, n_sub = 1;
1006
1007 if (is_key(map, primary_key->field_idx))
1008 primary_fn = cmp_entries_key;
1009 else
1010 primary_fn = cmp_entries_sum;
1011
1012 if (is_key(map, secondary_key->field_idx))
1013 secondary_fn = cmp_entries_key;
1014 else
1015 secondary_fn = cmp_entries_sum;
1016
1017 for (i = 0; i < n_entries - 1; i++) {
1018 const struct tracing_map_sort_entry **a = &entries[i];
1019 const struct tracing_map_sort_entry **b = &entries[i + 1];
1020
1021 if (primary_fn(a, b) == 0) {
1022 n_sub++;
1023 if (i < n_entries - 2)
1024 continue;
1025 }
1026
1027 if (n_sub < 2) {
1028 start = i + 1;
1029 n_sub = 1;
1030 continue;
1031 }
1032
1033 set_sort_key(map, secondary_key);
1034 sort(&entries[start], n_sub,
1035 sizeof(struct tracing_map_sort_entry *),
1036 (int (*)(const void *, const void *))secondary_fn, NULL);
1037 set_sort_key(map, primary_key);
1038
1039 start = i + 1;
1040 n_sub = 1;
1041 }
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068int tracing_map_sort_entries(struct tracing_map *map,
1069 struct tracing_map_sort_key *sort_keys,
1070 unsigned int n_sort_keys,
1071 struct tracing_map_sort_entry ***sort_entries)
1072{
1073 int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
1074 const struct tracing_map_sort_entry **);
1075 struct tracing_map_sort_entry *sort_entry, **entries;
1076 int i, n_entries, ret;
1077
1078 entries = vmalloc(map->max_elts * sizeof(sort_entry));
1079 if (!entries)
1080 return -ENOMEM;
1081
1082 for (i = 0, n_entries = 0; i < map->map_size; i++) {
1083 struct tracing_map_entry *entry;
1084
1085 entry = TRACING_MAP_ENTRY(map->map, i);
1086
1087 if (!entry->key || !entry->val)
1088 continue;
1089
1090 entries[n_entries] = create_sort_entry(entry->val->key,
1091 entry->val);
1092 if (!entries[n_entries++]) {
1093 ret = -ENOMEM;
1094 goto free;
1095 }
1096 }
1097
1098 if (n_entries == 0) {
1099 ret = 0;
1100 goto free;
1101 }
1102
1103 if (n_entries == 1) {
1104 *sort_entries = entries;
1105 return 1;
1106 }
1107
1108 detect_dups(entries, n_entries, map->key_size);
1109
1110 if (is_key(map, sort_keys[0].field_idx))
1111 cmp_entries_fn = cmp_entries_key;
1112 else
1113 cmp_entries_fn = cmp_entries_sum;
1114
1115 set_sort_key(map, &sort_keys[0]);
1116
1117 sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
1118 (int (*)(const void *, const void *))cmp_entries_fn, NULL);
1119
1120 if (n_sort_keys > 1)
1121 sort_secondary(map,
1122 (const struct tracing_map_sort_entry **)entries,
1123 n_entries,
1124 &sort_keys[0],
1125 &sort_keys[1]);
1126
1127 *sort_entries = entries;
1128
1129 return n_entries;
1130 free:
1131 tracing_map_destroy_sort_entries(entries, n_entries);
1132
1133 return ret;
1134}
1135