1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/etherdevice.h>
35#include <linux/idr.h>
36#include "mlx5_core.h"
37#include "lib/mlx5.h"
38
39void mlx5_init_reserved_gids(struct mlx5_core_dev *dev)
40{
41 unsigned int tblsz = MLX5_CAP_ROCE(dev, roce_address_table_size);
42
43 ida_init(&dev->roce.reserved_gids.ida);
44 dev->roce.reserved_gids.start = tblsz;
45 dev->roce.reserved_gids.count = 0;
46}
47
48void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev)
49{
50 WARN_ON(!ida_is_empty(&dev->roce.reserved_gids.ida));
51 dev->roce.reserved_gids.start = 0;
52 dev->roce.reserved_gids.count = 0;
53 ida_destroy(&dev->roce.reserved_gids.ida);
54}
55
56int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
57{
58 if (dev->roce.reserved_gids.start < count) {
59 mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
60 count);
61 return -ENOMEM;
62 }
63 if (dev->roce.reserved_gids.count + count > MLX5_MAX_RESERVED_GIDS) {
64 mlx5_core_warn(dev, "Unable to reserve %d more GIDs\n", count);
65 return -ENOMEM;
66 }
67
68 dev->roce.reserved_gids.start -= count;
69 dev->roce.reserved_gids.count += count;
70 mlx5_core_dbg(dev, "Reserved %u GIDs starting at %u\n",
71 dev->roce.reserved_gids.count,
72 dev->roce.reserved_gids.start);
73 return 0;
74}
75
76void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
77{
78 WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
79 count, dev->roce.reserved_gids.count);
80
81 dev->roce.reserved_gids.start += count;
82 dev->roce.reserved_gids.count -= count;
83 mlx5_core_dbg(dev, "%u GIDs starting at %u left reserved\n",
84 dev->roce.reserved_gids.count,
85 dev->roce.reserved_gids.start);
86}
87
88int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
89{
90 int end = dev->roce.reserved_gids.start +
91 dev->roce.reserved_gids.count - 1;
92 int index = 0;
93
94 index = ida_alloc_range(&dev->roce.reserved_gids.ida,
95 dev->roce.reserved_gids.start, end,
96 GFP_KERNEL);
97 if (index < 0)
98 return index;
99
100 mlx5_core_dbg(dev, "Allocating reserved GID %u\n", index);
101 *gid_index = index;
102 return 0;
103}
104
105void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
106{
107 mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
108 ida_free(&dev->roce.reserved_gids.ida, gid_index);
109}
110
111unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)
112{
113 return dev->roce.reserved_gids.count;
114}
115EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
116
117int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
118 u8 roce_version, u8 roce_l3_type, const u8 *gid,
119 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
120{
121#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
122 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {};
123 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
124 char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr,
125 source_l3_address);
126 void *addr_mac = MLX5_ADDR_OF(roce_addr_layout, in_addr,
127 source_mac_47_32);
128 int gidsz = MLX5_FLD_SZ_BYTES(roce_addr_layout, source_l3_address);
129
130 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
131 return -EINVAL;
132
133 if (gid) {
134 if (vlan) {
135 MLX5_SET_RA(in_addr, vlan_valid, 1);
136 MLX5_SET_RA(in_addr, vlan_id, vlan_id);
137 }
138
139 ether_addr_copy(addr_mac, mac);
140 memcpy(addr_l3_addr, gid, gidsz);
141 }
142 MLX5_SET_RA(in_addr, roce_version, roce_version);
143 MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
144
145 if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
146 MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
147
148 MLX5_SET(set_roce_address_in, in, roce_address_index, index);
149 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
150 return mlx5_cmd_exec_in(dev, set_roce_address, in);
151}
152EXPORT_SYMBOL(mlx5_core_roce_gid_set);
153