1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include "mlx5_core.h"
37
38
39int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
40 void *ctx, u32 *element_id)
41{
42 u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
43 u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
44 void *schedc;
45 int err;
46
47 schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
48 scheduling_context);
49 MLX5_SET(create_scheduling_element_in, in, opcode,
50 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
51 MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
52 hierarchy);
53 memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
54
55 err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out);
56 if (err)
57 return err;
58
59 *element_id = MLX5_GET(create_scheduling_element_out, out,
60 scheduling_element_id);
61 return 0;
62}
63
64int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
65 void *ctx, u32 element_id,
66 u32 modify_bitmask)
67{
68 u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {};
69 void *schedc;
70
71 schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
72 scheduling_context);
73 MLX5_SET(modify_scheduling_element_in, in, opcode,
74 MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
75 MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
76 element_id);
77 MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
78 modify_bitmask);
79 MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
80 hierarchy);
81 memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
82
83 return mlx5_cmd_exec_in(dev, modify_scheduling_element, in);
84}
85
86int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
87 u32 element_id)
88{
89 u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {};
90
91 MLX5_SET(destroy_scheduling_element_in, in, opcode,
92 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
93 MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
94 element_id);
95 MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
96 hierarchy);
97
98 return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in);
99}
100
101static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
102 u16 uid)
103{
104 return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
105 entry->uid == uid);
106}
107
108
109
110
111
112
113static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
114 void *rl_in, u16 uid, bool dedicated)
115{
116 struct mlx5_rl_entry *ret_entry = NULL;
117 bool empty_found = false;
118 int i;
119
120 lockdep_assert_held(&table->rl_lock);
121 WARN_ON(!table->rl_entry);
122
123 for (i = 0; i < table->max_size; i++) {
124 if (dedicated) {
125 if (!table->rl_entry[i].refcount)
126 return &table->rl_entry[i];
127 continue;
128 }
129
130 if (table->rl_entry[i].refcount) {
131 if (table->rl_entry[i].dedicated)
132 continue;
133 if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
134 uid))
135 return &table->rl_entry[i];
136 } else if (!empty_found) {
137 empty_found = true;
138 ret_entry = &table->rl_entry[i];
139 }
140 }
141
142 return ret_entry;
143}
144
145static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
146 struct mlx5_rl_entry *entry, bool set)
147{
148 u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
149 void *pp_context;
150
151 pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
152 MLX5_SET(set_pp_rate_limit_in, in, opcode,
153 MLX5_CMD_OP_SET_PP_RATE_LIMIT);
154 MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
155 MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
156 if (set)
157 memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
158 return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in);
159}
160
161bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
162{
163 struct mlx5_rl_table *table = &dev->priv.rl_table;
164
165 return (rate <= table->max_rate && rate >= table->min_rate);
166}
167EXPORT_SYMBOL(mlx5_rl_is_in_range);
168
169bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
170 struct mlx5_rate_limit *rl_1)
171{
172 return ((rl_0->rate == rl_1->rate) &&
173 (rl_0->max_burst_sz == rl_1->max_burst_sz) &&
174 (rl_0->typical_pkt_sz == rl_1->typical_pkt_sz));
175}
176EXPORT_SYMBOL(mlx5_rl_are_equal);
177
178static int mlx5_rl_table_get(struct mlx5_rl_table *table)
179{
180 int i;
181
182 lockdep_assert_held(&table->rl_lock);
183
184 if (table->rl_entry) {
185 table->refcount++;
186 return 0;
187 }
188
189 table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
190 GFP_KERNEL);
191 if (!table->rl_entry)
192 return -ENOMEM;
193
194
195
196
197 for (i = 0; i < table->max_size; i++)
198 table->rl_entry[i].index = i + 1;
199
200 table->refcount++;
201 return 0;
202}
203
204static void mlx5_rl_table_put(struct mlx5_rl_table *table)
205{
206 lockdep_assert_held(&table->rl_lock);
207 if (--table->refcount)
208 return;
209
210 kfree(table->rl_entry);
211 table->rl_entry = NULL;
212}
213
214static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
215{
216 int i;
217
218 if (!table->rl_entry)
219 return;
220
221
222 for (i = 0; i < table->max_size; i++)
223 if (table->rl_entry[i].refcount)
224 mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
225 kfree(table->rl_entry);
226}
227
228static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry)
229{
230 entry->refcount++;
231}
232
233static void
234mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry)
235{
236 entry->refcount--;
237 if (!entry->refcount)
238 mlx5_set_pp_rate_limit_cmd(dev, entry, false);
239}
240
241int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
242 bool dedicated_entry, u16 *index)
243{
244 struct mlx5_rl_table *table = &dev->priv.rl_table;
245 struct mlx5_rl_entry *entry;
246 u32 rate;
247 int err;
248
249 if (!table->max_size)
250 return -EOPNOTSUPP;
251
252 rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
253 if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
254 mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
255 rate, table->min_rate, table->max_rate);
256 return -EINVAL;
257 }
258
259 mutex_lock(&table->rl_lock);
260 err = mlx5_rl_table_get(table);
261 if (err)
262 goto out;
263
264 entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
265 if (!entry) {
266 mlx5_core_err(dev, "Max number of %u rates reached\n",
267 table->max_size);
268 err = -ENOSPC;
269 goto rl_err;
270 }
271 if (!entry->refcount) {
272
273 memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
274 entry->uid = uid;
275 err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
276 if (err) {
277 mlx5_core_err(
278 dev,
279 "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
280 err, rate,
281 MLX5_GET(set_pp_rate_limit_context, rl_in,
282 burst_upper_bound),
283 MLX5_GET(set_pp_rate_limit_context, rl_in,
284 typical_packet_size));
285 goto rl_err;
286 }
287
288 entry->dedicated = dedicated_entry;
289 }
290 mlx5_rl_entry_get(entry);
291 *index = entry->index;
292 mutex_unlock(&table->rl_lock);
293 return 0;
294
295rl_err:
296 mlx5_rl_table_put(table);
297out:
298 mutex_unlock(&table->rl_lock);
299 return err;
300}
301EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
302
303void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
304{
305 struct mlx5_rl_table *table = &dev->priv.rl_table;
306 struct mlx5_rl_entry *entry;
307
308 mutex_lock(&table->rl_lock);
309 entry = &table->rl_entry[index - 1];
310 mlx5_rl_entry_put(dev, entry);
311 mlx5_rl_table_put(table);
312 mutex_unlock(&table->rl_lock);
313}
314EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
315
316int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
317 struct mlx5_rate_limit *rl)
318{
319 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
320
321 MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
322 MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
323 rl->max_burst_sz);
324 MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
325 rl->typical_pkt_sz);
326
327 return mlx5_rl_add_rate_raw(dev, rl_raw,
328 MLX5_CAP_QOS(dev, packet_pacing_uid) ?
329 MLX5_SHARED_RESOURCE_UID : 0,
330 false, index);
331}
332EXPORT_SYMBOL(mlx5_rl_add_rate);
333
334void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
335{
336 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
337 struct mlx5_rl_table *table = &dev->priv.rl_table;
338 struct mlx5_rl_entry *entry = NULL;
339
340
341 if (rl->rate == 0)
342 return;
343
344 MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
345 MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
346 rl->max_burst_sz);
347 MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
348 rl->typical_pkt_sz);
349
350 mutex_lock(&table->rl_lock);
351 entry = find_rl_entry(table, rl_raw,
352 MLX5_CAP_QOS(dev, packet_pacing_uid) ?
353 MLX5_SHARED_RESOURCE_UID : 0, false);
354 if (!entry || !entry->refcount) {
355 mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
356 rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
357 goto out;
358 }
359 mlx5_rl_entry_put(dev, entry);
360 mlx5_rl_table_put(table);
361out:
362 mutex_unlock(&table->rl_lock);
363}
364EXPORT_SYMBOL(mlx5_rl_remove_rate);
365
366int mlx5_init_rl_table(struct mlx5_core_dev *dev)
367{
368 struct mlx5_rl_table *table = &dev->priv.rl_table;
369
370 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
371 table->max_size = 0;
372 return 0;
373 }
374
375 mutex_init(&table->rl_lock);
376
377
378 table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
379 table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
380 table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
381
382 mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
383 table->max_size,
384 table->min_rate >> 10,
385 table->max_rate >> 10);
386
387 return 0;
388}
389
390void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
391{
392 struct mlx5_rl_table *table = &dev->priv.rl_table;
393
394 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing))
395 return;
396
397 mlx5_rl_table_free(dev, table);
398 mutex_destroy(&table->rl_lock);
399}
400