1
2
3
4
5
6
7
8
9#undef pr_fmt
10#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
11
12#include "rtrs-clt.h"
13
14void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
15{
16 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
17 struct rtrs_clt_stats *stats = sess->stats;
18 struct rtrs_clt_stats_pcpu *s;
19 int cpu;
20
21 cpu = raw_smp_processor_id();
22 s = this_cpu_ptr(stats->pcpu_stats);
23 if (unlikely(con->cpu != cpu)) {
24 s->cpu_migr.to++;
25
26
27 s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
28 atomic_inc(&s->cpu_migr.from);
29 }
30}
31
32void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
33{
34 struct rtrs_clt_stats_pcpu *s;
35
36 s = this_cpu_ptr(stats->pcpu_stats);
37 s->rdma.failover_cnt++;
38}
39
40int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
41 char *buf, size_t len)
42{
43 struct rtrs_clt_stats_pcpu *s;
44
45 size_t used;
46 int cpu;
47
48 used = scnprintf(buf, len, " ");
49 for_each_possible_cpu(cpu)
50 used += scnprintf(buf + used, len - used, " CPU%u", cpu);
51
52 used += scnprintf(buf + used, len - used, "\nfrom:");
53 for_each_possible_cpu(cpu) {
54 s = per_cpu_ptr(stats->pcpu_stats, cpu);
55 used += scnprintf(buf + used, len - used, " %d",
56 atomic_read(&s->cpu_migr.from));
57 }
58
59 used += scnprintf(buf + used, len - used, "\nto :");
60 for_each_possible_cpu(cpu) {
61 s = per_cpu_ptr(stats->pcpu_stats, cpu);
62 used += scnprintf(buf + used, len - used, " %d",
63 s->cpu_migr.to);
64 }
65 used += scnprintf(buf + used, len - used, "\n");
66
67 return used;
68}
69
70int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
71 size_t len)
72{
73 return scnprintf(buf, len, "%d %d\n",
74 stats->reconnects.successful_cnt,
75 stats->reconnects.fail_cnt);
76}
77
78ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
79 char *page, size_t len)
80{
81 struct rtrs_clt_stats_rdma sum;
82 struct rtrs_clt_stats_rdma *r;
83 int cpu;
84
85 memset(&sum, 0, sizeof(sum));
86
87 for_each_possible_cpu(cpu) {
88 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
89
90 sum.dir[READ].cnt += r->dir[READ].cnt;
91 sum.dir[READ].size_total += r->dir[READ].size_total;
92 sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
93 sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
94 sum.failover_cnt += r->failover_cnt;
95 }
96
97 return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
98 sum.dir[READ].cnt, sum.dir[READ].size_total,
99 sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
100 atomic_read(&stats->inflight), sum.failover_cnt);
101}
102
103ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
104 char *page, size_t len)
105{
106 return scnprintf(page, len, "echo 1 to reset all statistics\n");
107}
108
109int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
110{
111 struct rtrs_clt_stats_pcpu *s;
112 int cpu;
113
114 if (!enable)
115 return -EINVAL;
116
117 for_each_possible_cpu(cpu) {
118 s = per_cpu_ptr(stats->pcpu_stats, cpu);
119 memset(&s->rdma, 0, sizeof(s->rdma));
120 }
121
122 return 0;
123}
124
125int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
126{
127 struct rtrs_clt_stats_pcpu *s;
128 int cpu;
129
130 if (!enable)
131 return -EINVAL;
132
133 for_each_possible_cpu(cpu) {
134 s = per_cpu_ptr(stats->pcpu_stats, cpu);
135 memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
136 }
137
138 return 0;
139}
140
141int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
142{
143 if (!enable)
144 return -EINVAL;
145
146 memset(&stats->reconnects, 0, sizeof(stats->reconnects));
147
148 return 0;
149}
150
151int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
152{
153 if (enable) {
154 rtrs_clt_reset_rdma_stats(s, enable);
155 rtrs_clt_reset_cpu_migr_stats(s, enable);
156 rtrs_clt_reset_reconnects_stat(s, enable);
157 atomic_set(&s->inflight, 0);
158 return 0;
159 }
160
161 return -EINVAL;
162}
163
164static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
165 size_t size, int d)
166{
167 struct rtrs_clt_stats_pcpu *s;
168
169 s = this_cpu_ptr(stats->pcpu_stats);
170 s->rdma.dir[d].cnt++;
171 s->rdma.dir[d].size_total += size;
172}
173
174void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
175{
176 struct rtrs_clt_con *con = req->con;
177 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
178 struct rtrs_clt_stats *stats = sess->stats;
179 unsigned int len;
180
181 len = req->usr_len + req->data_len;
182 rtrs_clt_update_rdma_stats(stats, len, dir);
183 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
184 atomic_inc(&stats->inflight);
185}
186
187int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
188{
189 stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
190 if (!stats->pcpu_stats)
191 return -ENOMEM;
192
193
194
195
196
197 stats->reconnects.successful_cnt = -1;
198
199 return 0;
200}
201