1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
41#include <linux/slab.h>
42#include <linux/io-mapping.h>
43#include <linux/delay.h>
44#include <linux/netdevice.h>
45
46#include <linux/mlx4/device.h>
47#include <linux/mlx4/doorbell.h>
48
49#include "mlx4.h"
50#include "fw.h"
51#include "icm.h"
52
53MODULE_AUTHOR("Roland Dreier");
54MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55MODULE_LICENSE("Dual BSD/GPL");
56MODULE_VERSION(DRV_VERSION);
57
58struct workqueue_struct *mlx4_wq;
59
60#ifdef CONFIG_MLX4_DEBUG
61
62int mlx4_debug_level = 0;
63module_param_named(debug_level, mlx4_debug_level, int, 0644);
64MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
65
66#endif
67
68#ifdef CONFIG_PCI_MSI
69
70static int msi_x = 1;
71module_param(msi_x, int, 0444);
72MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
73
74#else
75
76#define msi_x (0)
77
78#endif
79
80static int num_vfs;
81module_param(num_vfs, int, 0444);
82MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
83
84static int probe_vf;
85module_param(probe_vf, int, 0644);
86MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
87
88int mlx4_log_num_mgm_entry_size = 10;
89module_param_named(log_num_mgm_entry_size,
90 mlx4_log_num_mgm_entry_size, int, 0444);
91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
92 " of qp per mcg, for example:"
93 " 10 gives 248.range: 9<="
94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed"
96 " flow steering");
97
98#define HCA_GLOBAL_CAP_MASK 0
99#define PF_CONTEXT_BEHAVIOUR_MASK 0
100
101static char mlx4_version[] __devinitdata =
102 DRV_NAME ": Mellanox ConnectX core driver v"
103 DRV_VERSION " (" DRV_RELDATE ")\n";
104
105static struct mlx4_profile default_profile = {
106 .num_qp = 1 << 18,
107 .num_srq = 1 << 16,
108 .rdmarc_per_qp = 1 << 4,
109 .num_cq = 1 << 16,
110 .num_mcg = 1 << 13,
111 .num_mpt = 1 << 19,
112 .num_mtt = 1 << 20,
113};
114
115static int log_num_mac = 7;
116module_param_named(log_num_mac, log_num_mac, int, 0444);
117MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
118
119static int log_num_vlan;
120module_param_named(log_num_vlan, log_num_vlan, int, 0444);
121MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
122
123#define MLX4_LOG_NUM_VLANS 7
124
125static bool use_prio;
126module_param_named(use_prio, use_prio, bool, 0444);
127MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
128 "(0/1, default 0)");
129
130int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
131module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
132MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
133
134static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
135static int arr_argc = 2;
136module_param_array(port_type_array, int, &arr_argc, 0444);
137MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
138 "1 for IB, 2 for Ethernet");
139
140struct mlx4_port_config {
141 struct list_head list;
142 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
143 struct pci_dev *pdev;
144};
145
146int mlx4_check_port_params(struct mlx4_dev *dev,
147 enum mlx4_port_type *port_type)
148{
149 int i;
150
151 for (i = 0; i < dev->caps.num_ports - 1; i++) {
152 if (port_type[i] != port_type[i + 1]) {
153 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
154 mlx4_err(dev, "Only same port types supported "
155 "on this HCA, aborting.\n");
156 return -EINVAL;
157 }
158 }
159 }
160
161 for (i = 0; i < dev->caps.num_ports; i++) {
162 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
163 mlx4_err(dev, "Requested port type for port %d is not "
164 "supported on this HCA\n", i + 1);
165 return -EINVAL;
166 }
167 }
168 return 0;
169}
170
171static void mlx4_set_port_mask(struct mlx4_dev *dev)
172{
173 int i;
174
175 for (i = 1; i <= dev->caps.num_ports; ++i)
176 dev->caps.port_mask[i] = dev->caps.port_type[i];
177}
178
179static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
180{
181 int err;
182 int i;
183
184 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
185 if (err) {
186 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
187 return err;
188 }
189
190 if (dev_cap->min_page_sz > PAGE_SIZE) {
191 mlx4_err(dev, "HCA minimum page size of %d bigger than "
192 "kernel PAGE_SIZE of %ld, aborting.\n",
193 dev_cap->min_page_sz, PAGE_SIZE);
194 return -ENODEV;
195 }
196 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
197 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
198 "aborting.\n",
199 dev_cap->num_ports, MLX4_MAX_PORTS);
200 return -ENODEV;
201 }
202
203 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
204 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
205 "PCI resource 2 size of 0x%llx, aborting.\n",
206 dev_cap->uar_size,
207 (unsigned long long) pci_resource_len(dev->pdev, 2));
208 return -ENODEV;
209 }
210
211 dev->caps.num_ports = dev_cap->num_ports;
212 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
213 for (i = 1; i <= dev->caps.num_ports; ++i) {
214 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
215 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
216 dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i];
217 dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
218
219
220 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
221 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
222 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
223 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
224 dev->caps.def_mac[i] = dev_cap->def_mac[i];
225 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
226 dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
227 dev->caps.default_sense[i] = dev_cap->default_sense[i];
228 dev->caps.trans_type[i] = dev_cap->trans_type[i];
229 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
230 dev->caps.wavelength[i] = dev_cap->wavelength[i];
231 dev->caps.trans_code[i] = dev_cap->trans_code[i];
232 }
233
234 dev->caps.uar_page_size = PAGE_SIZE;
235 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
236 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
237 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
238 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
239 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
240 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
241 dev->caps.max_wqes = dev_cap->max_qp_sz;
242 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
243 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
244 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
245 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
246 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
247 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
248
249
250
251
252
253 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
254 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
255 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
256 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
257 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
258
259
260 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
261 dev->caps.reserved_pds = dev_cap->reserved_pds;
262 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
263 dev_cap->reserved_xrcds : 0;
264 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
265 dev_cap->max_xrcds : 0;
266 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
267
268 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
269 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
270 dev->caps.flags = dev_cap->flags;
271 dev->caps.flags2 = dev_cap->flags2;
272 dev->caps.bmme_flags = dev_cap->bmme_flags;
273 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
274 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
275 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
276 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
277
278 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
279 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
280 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
281 dev->caps.fs_log_max_ucast_qp_range_size =
282 dev_cap->fs_log_max_ucast_qp_range_size;
283 } else {
284 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
285 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
286 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
287 } else {
288 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
289
290 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
291 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
292 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
293 "set to use B0 steering. Falling back to A0 steering mode.\n");
294 }
295 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
296 }
297 mlx4_dbg(dev, "Steering mode is: %s\n",
298 mlx4_steering_mode_str(dev->caps.steering_mode));
299
300
301 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
302 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
303
304 if (mlx4_is_mfunc(dev))
305 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
306
307 dev->caps.log_num_macs = log_num_mac;
308 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
309 dev->caps.log_num_prios = use_prio ? 3 : 0;
310
311 for (i = 1; i <= dev->caps.num_ports; ++i) {
312 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
313 if (dev->caps.supported_type[i]) {
314
315 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
316 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
317
318 else if (dev->caps.supported_type[i] ==
319 MLX4_PORT_TYPE_IB)
320 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
321 else {
322
323
324
325 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
326 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
327 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
328 else
329 dev->caps.port_type[i] = port_type_array[i - 1];
330 }
331 }
332
333
334
335
336
337
338 mlx4_priv(dev)->sense.sense_allowed[i] =
339 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
340 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
341 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
342
343
344
345
346
347
348 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
349 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
350 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
351 mlx4_SENSE_PORT(dev, i, &sensed_port);
352 if (sensed_port != MLX4_PORT_TYPE_NONE)
353 dev->caps.port_type[i] = sensed_port;
354 } else {
355 dev->caps.possible_type[i] = dev->caps.port_type[i];
356 }
357
358 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
359 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
360 mlx4_warn(dev, "Requested number of MACs is too much "
361 "for port %d, reducing to %d.\n",
362 i, 1 << dev->caps.log_num_macs);
363 }
364 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
365 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
366 mlx4_warn(dev, "Requested number of VLANs is too much "
367 "for port %d, reducing to %d.\n",
368 i, 1 << dev->caps.log_num_vlans);
369 }
370 }
371
372 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
373
374 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
375 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
376 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
377 (1 << dev->caps.log_num_macs) *
378 (1 << dev->caps.log_num_vlans) *
379 (1 << dev->caps.log_num_prios) *
380 dev->caps.num_ports;
381 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
382
383 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
384 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
385 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
386 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
387
388 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
389 return 0;
390}
391
392static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
393{
394 struct mlx4_priv *priv = mlx4_priv(dev);
395 struct mlx4_slave_state *s_state;
396 int i;
397 int ret = 0;
398
399 for (i = 1; i < dev->num_slaves; ++i) {
400 s_state = &priv->mfunc.master.slave_state[i];
401 if (s_state->active && s_state->last_cmd !=
402 MLX4_COMM_CMD_RESET) {
403 mlx4_warn(dev, "%s: slave: %d is still active\n",
404 __func__, i);
405 ret++;
406 }
407 }
408 return ret;
409}
410
411int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
412{
413 u32 qk = MLX4_RESERVED_QKEY_BASE;
414
415 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
416 qpn < dev->phys_caps.base_proxy_sqpn)
417 return -EINVAL;
418
419 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
420
421 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
422 else
423 qk += qpn - dev->phys_caps.base_proxy_sqpn;
424 *qkey = qk;
425 return 0;
426}
427EXPORT_SYMBOL(mlx4_get_parav_qkey);
428
429void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
430{
431 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
432
433 if (!mlx4_is_master(dev))
434 return;
435
436 priv->virt2phys_pkey[slave][port - 1][i] = val;
437}
438EXPORT_SYMBOL(mlx4_sync_pkey_table);
439
440void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
441{
442 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
443
444 if (!mlx4_is_master(dev))
445 return;
446
447 priv->slave_node_guids[slave] = guid;
448}
449EXPORT_SYMBOL(mlx4_put_slave_node_guid);
450
451__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
452{
453 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
454
455 if (!mlx4_is_master(dev))
456 return 0;
457
458 return priv->slave_node_guids[slave];
459}
460EXPORT_SYMBOL(mlx4_get_slave_node_guid);
461
462int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
463{
464 struct mlx4_priv *priv = mlx4_priv(dev);
465 struct mlx4_slave_state *s_slave;
466
467 if (!mlx4_is_master(dev))
468 return 0;
469
470 s_slave = &priv->mfunc.master.slave_state[slave];
471 return !!s_slave->active;
472}
473EXPORT_SYMBOL(mlx4_is_slave_active);
474
475static int mlx4_slave_cap(struct mlx4_dev *dev)
476{
477 int err;
478 u32 page_size;
479 struct mlx4_dev_cap dev_cap;
480 struct mlx4_func_cap func_cap;
481 struct mlx4_init_hca_param hca_param;
482 int i;
483
484 memset(&hca_param, 0, sizeof(hca_param));
485 err = mlx4_QUERY_HCA(dev, &hca_param);
486 if (err) {
487 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
488 return err;
489 }
490
491
492 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
493 HCA_GLOBAL_CAP_MASK) {
494 mlx4_err(dev, "Unknown hca global capabilities\n");
495 return -ENOSYS;
496 }
497
498 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
499
500 memset(&dev_cap, 0, sizeof(dev_cap));
501 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
502 err = mlx4_dev_cap(dev, &dev_cap);
503 if (err) {
504 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
505 return err;
506 }
507
508 err = mlx4_QUERY_FW(dev);
509 if (err)
510 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
511
512 page_size = ~dev->caps.page_size_cap + 1;
513 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
514 if (page_size > PAGE_SIZE) {
515 mlx4_err(dev, "HCA minimum page size of %d bigger than "
516 "kernel PAGE_SIZE of %ld, aborting.\n",
517 page_size, PAGE_SIZE);
518 return -ENODEV;
519 }
520
521
522 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
523
524
525 if (dev->caps.uar_page_size != PAGE_SIZE) {
526 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
527 dev->caps.uar_page_size, PAGE_SIZE);
528 return -ENODEV;
529 }
530
531 memset(&func_cap, 0, sizeof(func_cap));
532 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
533 if (err) {
534 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
535 err);
536 return err;
537 }
538
539 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
540 PF_CONTEXT_BEHAVIOUR_MASK) {
541 mlx4_err(dev, "Unknown pf context behaviour\n");
542 return -ENOSYS;
543 }
544
545 dev->caps.num_ports = func_cap.num_ports;
546 dev->caps.num_qps = func_cap.qp_quota;
547 dev->caps.num_srqs = func_cap.srq_quota;
548 dev->caps.num_cqs = func_cap.cq_quota;
549 dev->caps.num_eqs = func_cap.max_eq;
550 dev->caps.reserved_eqs = func_cap.reserved_eq;
551 dev->caps.num_mpts = func_cap.mpt_quota;
552 dev->caps.num_mtts = func_cap.mtt_quota;
553 dev->caps.num_pds = MLX4_NUM_PDS;
554 dev->caps.num_mgms = 0;
555 dev->caps.num_amgms = 0;
556
557 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
558 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
559 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
560 return -ENODEV;
561 }
562
563 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
564 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
565 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
566 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
567
568 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
569 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
570 err = -ENOMEM;
571 goto err_mem;
572 }
573
574 for (i = 1; i <= dev->caps.num_ports; ++i) {
575 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
576 if (err) {
577 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
578 " port %d, aborting (%d).\n", i, err);
579 goto err_mem;
580 }
581 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
582 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
583 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
584 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
585 dev->caps.port_mask[i] = dev->caps.port_type[i];
586 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
587 &dev->caps.gid_table_len[i],
588 &dev->caps.pkey_table_len[i]))
589 goto err_mem;
590 }
591
592 if (dev->caps.uar_page_size * (dev->caps.num_uars -
593 dev->caps.reserved_uars) >
594 pci_resource_len(dev->pdev, 2)) {
595 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
596 "PCI resource 2 size of 0x%llx, aborting.\n",
597 dev->caps.uar_page_size * dev->caps.num_uars,
598 (unsigned long long) pci_resource_len(dev->pdev, 2));
599 goto err_mem;
600 }
601
602 return 0;
603
604err_mem:
605 kfree(dev->caps.qp0_tunnel);
606 kfree(dev->caps.qp0_proxy);
607 kfree(dev->caps.qp1_tunnel);
608 kfree(dev->caps.qp1_proxy);
609 dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
610 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
611
612 return err;
613}
614
615
616
617
618
619int mlx4_change_port_types(struct mlx4_dev *dev,
620 enum mlx4_port_type *port_types)
621{
622 int err = 0;
623 int change = 0;
624 int port;
625
626 for (port = 0; port < dev->caps.num_ports; port++) {
627
628
629 if (port_types[port] != dev->caps.port_type[port + 1])
630 change = 1;
631 }
632 if (change) {
633 mlx4_unregister_device(dev);
634 for (port = 1; port <= dev->caps.num_ports; port++) {
635 mlx4_CLOSE_PORT(dev, port);
636 dev->caps.port_type[port] = port_types[port - 1];
637 err = mlx4_SET_PORT(dev, port, -1);
638 if (err) {
639 mlx4_err(dev, "Failed to set port %d, "
640 "aborting\n", port);
641 goto out;
642 }
643 }
644 mlx4_set_port_mask(dev);
645 err = mlx4_register_device(dev);
646 }
647
648out:
649 return err;
650}
651
652static ssize_t show_port_type(struct device *dev,
653 struct device_attribute *attr,
654 char *buf)
655{
656 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
657 port_attr);
658 struct mlx4_dev *mdev = info->dev;
659 char type[8];
660
661 sprintf(type, "%s",
662 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
663 "ib" : "eth");
664 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
665 sprintf(buf, "auto (%s)\n", type);
666 else
667 sprintf(buf, "%s\n", type);
668
669 return strlen(buf);
670}
671
672static ssize_t set_port_type(struct device *dev,
673 struct device_attribute *attr,
674 const char *buf, size_t count)
675{
676 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
677 port_attr);
678 struct mlx4_dev *mdev = info->dev;
679 struct mlx4_priv *priv = mlx4_priv(mdev);
680 enum mlx4_port_type types[MLX4_MAX_PORTS];
681 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
682 int i;
683 int err = 0;
684
685 if (!strcmp(buf, "ib\n"))
686 info->tmp_type = MLX4_PORT_TYPE_IB;
687 else if (!strcmp(buf, "eth\n"))
688 info->tmp_type = MLX4_PORT_TYPE_ETH;
689 else if (!strcmp(buf, "auto\n"))
690 info->tmp_type = MLX4_PORT_TYPE_AUTO;
691 else {
692 mlx4_err(mdev, "%s is not supported port type\n", buf);
693 return -EINVAL;
694 }
695
696 mlx4_stop_sense(mdev);
697 mutex_lock(&priv->port_mutex);
698
699 mdev->caps.possible_type[info->port] = info->tmp_type;
700
701 for (i = 0; i < mdev->caps.num_ports; i++) {
702 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
703 mdev->caps.possible_type[i+1];
704 if (types[i] == MLX4_PORT_TYPE_AUTO)
705 types[i] = mdev->caps.port_type[i+1];
706 }
707
708 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
709 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
710 for (i = 1; i <= mdev->caps.num_ports; i++) {
711 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
712 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
713 err = -EINVAL;
714 }
715 }
716 }
717 if (err) {
718 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
719 "Set only 'eth' or 'ib' for both ports "
720 "(should be the same)\n");
721 goto out;
722 }
723
724 mlx4_do_sense_ports(mdev, new_types, types);
725
726 err = mlx4_check_port_params(mdev, new_types);
727 if (err)
728 goto out;
729
730
731
732
733 for (i = 0; i < mdev->caps.num_ports; i++)
734 priv->port[i + 1].tmp_type = 0;
735
736 err = mlx4_change_port_types(mdev, new_types);
737
738out:
739 mlx4_start_sense(mdev);
740 mutex_unlock(&priv->port_mutex);
741 return err ? err : count;
742}
743
744enum ibta_mtu {
745 IB_MTU_256 = 1,
746 IB_MTU_512 = 2,
747 IB_MTU_1024 = 3,
748 IB_MTU_2048 = 4,
749 IB_MTU_4096 = 5
750};
751
752static inline int int_to_ibta_mtu(int mtu)
753{
754 switch (mtu) {
755 case 256: return IB_MTU_256;
756 case 512: return IB_MTU_512;
757 case 1024: return IB_MTU_1024;
758 case 2048: return IB_MTU_2048;
759 case 4096: return IB_MTU_4096;
760 default: return -1;
761 }
762}
763
764static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
765{
766 switch (mtu) {
767 case IB_MTU_256: return 256;
768 case IB_MTU_512: return 512;
769 case IB_MTU_1024: return 1024;
770 case IB_MTU_2048: return 2048;
771 case IB_MTU_4096: return 4096;
772 default: return -1;
773 }
774}
775
776static ssize_t show_port_ib_mtu(struct device *dev,
777 struct device_attribute *attr,
778 char *buf)
779{
780 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
781 port_mtu_attr);
782 struct mlx4_dev *mdev = info->dev;
783
784 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
785 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
786
787 sprintf(buf, "%d\n",
788 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
789 return strlen(buf);
790}
791
792static ssize_t set_port_ib_mtu(struct device *dev,
793 struct device_attribute *attr,
794 const char *buf, size_t count)
795{
796 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
797 port_mtu_attr);
798 struct mlx4_dev *mdev = info->dev;
799 struct mlx4_priv *priv = mlx4_priv(mdev);
800 int err, port, mtu, ibta_mtu = -1;
801
802 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
803 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
804 return -EINVAL;
805 }
806
807 err = sscanf(buf, "%d", &mtu);
808 if (err > 0)
809 ibta_mtu = int_to_ibta_mtu(mtu);
810
811 if (err <= 0 || ibta_mtu < 0) {
812 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
813 return -EINVAL;
814 }
815
816 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
817
818 mlx4_stop_sense(mdev);
819 mutex_lock(&priv->port_mutex);
820 mlx4_unregister_device(mdev);
821 for (port = 1; port <= mdev->caps.num_ports; port++) {
822 mlx4_CLOSE_PORT(mdev, port);
823 err = mlx4_SET_PORT(mdev, port, -1);
824 if (err) {
825 mlx4_err(mdev, "Failed to set port %d, "
826 "aborting\n", port);
827 goto err_set_port;
828 }
829 }
830 err = mlx4_register_device(mdev);
831err_set_port:
832 mutex_unlock(&priv->port_mutex);
833 mlx4_start_sense(mdev);
834 return err ? err : count;
835}
836
837static int mlx4_load_fw(struct mlx4_dev *dev)
838{
839 struct mlx4_priv *priv = mlx4_priv(dev);
840 int err;
841
842 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
843 GFP_HIGHUSER | __GFP_NOWARN, 0);
844 if (!priv->fw.fw_icm) {
845 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
846 return -ENOMEM;
847 }
848
849 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
850 if (err) {
851 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
852 goto err_free;
853 }
854
855 err = mlx4_RUN_FW(dev);
856 if (err) {
857 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
858 goto err_unmap_fa;
859 }
860
861 return 0;
862
863err_unmap_fa:
864 mlx4_UNMAP_FA(dev);
865
866err_free:
867 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
868 return err;
869}
870
871static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
872 int cmpt_entry_sz)
873{
874 struct mlx4_priv *priv = mlx4_priv(dev);
875 int err;
876 int num_eqs;
877
878 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
879 cmpt_base +
880 ((u64) (MLX4_CMPT_TYPE_QP *
881 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
882 cmpt_entry_sz, dev->caps.num_qps,
883 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
884 0, 0);
885 if (err)
886 goto err;
887
888 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
889 cmpt_base +
890 ((u64) (MLX4_CMPT_TYPE_SRQ *
891 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
892 cmpt_entry_sz, dev->caps.num_srqs,
893 dev->caps.reserved_srqs, 0, 0);
894 if (err)
895 goto err_qp;
896
897 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
898 cmpt_base +
899 ((u64) (MLX4_CMPT_TYPE_CQ *
900 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
901 cmpt_entry_sz, dev->caps.num_cqs,
902 dev->caps.reserved_cqs, 0, 0);
903 if (err)
904 goto err_srq;
905
906 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
907 dev->caps.num_eqs;
908 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
909 cmpt_base +
910 ((u64) (MLX4_CMPT_TYPE_EQ *
911 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
912 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
913 if (err)
914 goto err_cq;
915
916 return 0;
917
918err_cq:
919 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
920
921err_srq:
922 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
923
924err_qp:
925 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
926
927err:
928 return err;
929}
930
931static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
932 struct mlx4_init_hca_param *init_hca, u64 icm_size)
933{
934 struct mlx4_priv *priv = mlx4_priv(dev);
935 u64 aux_pages;
936 int num_eqs;
937 int err;
938
939 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
940 if (err) {
941 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
942 return err;
943 }
944
945 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
946 (unsigned long long) icm_size >> 10,
947 (unsigned long long) aux_pages << 2);
948
949 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
950 GFP_HIGHUSER | __GFP_NOWARN, 0);
951 if (!priv->fw.aux_icm) {
952 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
953 return -ENOMEM;
954 }
955
956 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
957 if (err) {
958 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
959 goto err_free_aux;
960 }
961
962 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
963 if (err) {
964 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
965 goto err_unmap_aux;
966 }
967
968
969 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
970 dev->caps.num_eqs;
971 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
972 init_hca->eqc_base, dev_cap->eqc_entry_sz,
973 num_eqs, num_eqs, 0, 0);
974 if (err) {
975 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
976 goto err_unmap_cmpt;
977 }
978
979
980
981
982
983
984
985
986 dev->caps.reserved_mtts =
987 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
988 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
989
990 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
991 init_hca->mtt_base,
992 dev->caps.mtt_entry_sz,
993 dev->caps.num_mtts,
994 dev->caps.reserved_mtts, 1, 0);
995 if (err) {
996 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
997 goto err_unmap_eq;
998 }
999
1000 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1001 init_hca->dmpt_base,
1002 dev_cap->dmpt_entry_sz,
1003 dev->caps.num_mpts,
1004 dev->caps.reserved_mrws, 1, 1);
1005 if (err) {
1006 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
1007 goto err_unmap_mtt;
1008 }
1009
1010 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1011 init_hca->qpc_base,
1012 dev_cap->qpc_entry_sz,
1013 dev->caps.num_qps,
1014 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1015 0, 0);
1016 if (err) {
1017 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
1018 goto err_unmap_dmpt;
1019 }
1020
1021 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1022 init_hca->auxc_base,
1023 dev_cap->aux_entry_sz,
1024 dev->caps.num_qps,
1025 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1026 0, 0);
1027 if (err) {
1028 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
1029 goto err_unmap_qp;
1030 }
1031
1032 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1033 init_hca->altc_base,
1034 dev_cap->altc_entry_sz,
1035 dev->caps.num_qps,
1036 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1037 0, 0);
1038 if (err) {
1039 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
1040 goto err_unmap_auxc;
1041 }
1042
1043 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1044 init_hca->rdmarc_base,
1045 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1046 dev->caps.num_qps,
1047 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1048 0, 0);
1049 if (err) {
1050 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1051 goto err_unmap_altc;
1052 }
1053
1054 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1055 init_hca->cqc_base,
1056 dev_cap->cqc_entry_sz,
1057 dev->caps.num_cqs,
1058 dev->caps.reserved_cqs, 0, 0);
1059 if (err) {
1060 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
1061 goto err_unmap_rdmarc;
1062 }
1063
1064 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1065 init_hca->srqc_base,
1066 dev_cap->srq_entry_sz,
1067 dev->caps.num_srqs,
1068 dev->caps.reserved_srqs, 0, 0);
1069 if (err) {
1070 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
1071 goto err_unmap_cq;
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1082 init_hca->mc_base,
1083 mlx4_get_mgm_entry_size(dev),
1084 dev->caps.num_mgms + dev->caps.num_amgms,
1085 dev->caps.num_mgms + dev->caps.num_amgms,
1086 0, 0);
1087 if (err) {
1088 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
1089 goto err_unmap_srq;
1090 }
1091
1092 return 0;
1093
1094err_unmap_srq:
1095 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1096
1097err_unmap_cq:
1098 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1099
1100err_unmap_rdmarc:
1101 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1102
1103err_unmap_altc:
1104 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1105
1106err_unmap_auxc:
1107 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1108
1109err_unmap_qp:
1110 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1111
1112err_unmap_dmpt:
1113 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1114
1115err_unmap_mtt:
1116 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1117
1118err_unmap_eq:
1119 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1120
1121err_unmap_cmpt:
1122 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1123 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1124 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1125 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1126
1127err_unmap_aux:
1128 mlx4_UNMAP_ICM_AUX(dev);
1129
1130err_free_aux:
1131 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1132
1133 return err;
1134}
1135
1136static void mlx4_free_icms(struct mlx4_dev *dev)
1137{
1138 struct mlx4_priv *priv = mlx4_priv(dev);
1139
1140 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1141 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1142 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1143 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1144 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1145 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1146 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1147 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1148 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1149 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1150 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1151 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1152 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1153 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1154
1155 mlx4_UNMAP_ICM_AUX(dev);
1156 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1157}
1158
1159static void mlx4_slave_exit(struct mlx4_dev *dev)
1160{
1161 struct mlx4_priv *priv = mlx4_priv(dev);
1162
1163 mutex_lock(&priv->cmd.slave_cmd_mutex);
1164 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1165 mlx4_warn(dev, "Failed to close slave function.\n");
1166 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1167}
1168
1169static int map_bf_area(struct mlx4_dev *dev)
1170{
1171 struct mlx4_priv *priv = mlx4_priv(dev);
1172 resource_size_t bf_start;
1173 resource_size_t bf_len;
1174 int err = 0;
1175
1176 if (!dev->caps.bf_reg_size)
1177 return -ENXIO;
1178
1179 bf_start = pci_resource_start(dev->pdev, 2) +
1180 (dev->caps.num_uars << PAGE_SHIFT);
1181 bf_len = pci_resource_len(dev->pdev, 2) -
1182 (dev->caps.num_uars << PAGE_SHIFT);
1183 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1184 if (!priv->bf_mapping)
1185 err = -ENOMEM;
1186
1187 return err;
1188}
1189
1190static void unmap_bf_area(struct mlx4_dev *dev)
1191{
1192 if (mlx4_priv(dev)->bf_mapping)
1193 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1194}
1195
1196static void mlx4_close_hca(struct mlx4_dev *dev)
1197{
1198 unmap_bf_area(dev);
1199 if (mlx4_is_slave(dev))
1200 mlx4_slave_exit(dev);
1201 else {
1202 mlx4_CLOSE_HCA(dev, 0);
1203 mlx4_free_icms(dev);
1204 mlx4_UNMAP_FA(dev);
1205 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1206 }
1207}
1208
1209static int mlx4_init_slave(struct mlx4_dev *dev)
1210{
1211 struct mlx4_priv *priv = mlx4_priv(dev);
1212 u64 dma = (u64) priv->mfunc.vhcr_dma;
1213 int num_of_reset_retries = NUM_OF_RESET_RETRIES;
1214 int ret_from_reset = 0;
1215 u32 slave_read;
1216 u32 cmd_channel_ver;
1217
1218 mutex_lock(&priv->cmd.slave_cmd_mutex);
1219 priv->cmd.max_cmds = 1;
1220 mlx4_warn(dev, "Sending reset\n");
1221 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1222 MLX4_COMM_TIME);
1223
1224
1225 if (ret_from_reset) {
1226 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1227 msleep(SLEEP_TIME_IN_RESET);
1228 while (ret_from_reset && num_of_reset_retries) {
1229 mlx4_warn(dev, "slave is currently in the"
1230 "middle of FLR. retrying..."
1231 "(try num:%d)\n",
1232 (NUM_OF_RESET_RETRIES -
1233 num_of_reset_retries + 1));
1234 ret_from_reset =
1235 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
1236 0, MLX4_COMM_TIME);
1237 num_of_reset_retries = num_of_reset_retries - 1;
1238 }
1239 } else
1240 goto err;
1241 }
1242
1243
1244
1245 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1246 cmd_channel_ver = mlx4_comm_get_version();
1247
1248 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1249 MLX4_COMM_GET_IF_REV(slave_read)) {
1250 mlx4_err(dev, "slave driver version is not supported"
1251 " by the master\n");
1252 goto err;
1253 }
1254
1255 mlx4_warn(dev, "Sending vhcr0\n");
1256 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1257 MLX4_COMM_TIME))
1258 goto err;
1259 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1260 MLX4_COMM_TIME))
1261 goto err;
1262 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1263 MLX4_COMM_TIME))
1264 goto err;
1265 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1266 goto err;
1267
1268 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1269 return 0;
1270
1271err:
1272 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1273 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1274 return -EIO;
1275}
1276
1277static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1278{
1279 int i;
1280
1281 for (i = 1; i <= dev->caps.num_ports; i++) {
1282 dev->caps.gid_table_len[i] = 1;
1283 dev->caps.pkey_table_len[i] =
1284 dev->phys_caps.pkey_phys_table_len[i] - 1;
1285 }
1286}
1287
1288static int mlx4_init_hca(struct mlx4_dev *dev)
1289{
1290 struct mlx4_priv *priv = mlx4_priv(dev);
1291 struct mlx4_adapter adapter;
1292 struct mlx4_dev_cap dev_cap;
1293 struct mlx4_mod_stat_cfg mlx4_cfg;
1294 struct mlx4_profile profile;
1295 struct mlx4_init_hca_param init_hca;
1296 u64 icm_size;
1297 int err;
1298
1299 if (!mlx4_is_slave(dev)) {
1300 err = mlx4_QUERY_FW(dev);
1301 if (err) {
1302 if (err == -EACCES)
1303 mlx4_info(dev, "non-primary physical function, skipping.\n");
1304 else
1305 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
1306 return err;
1307 }
1308
1309 err = mlx4_load_fw(dev);
1310 if (err) {
1311 mlx4_err(dev, "Failed to start FW, aborting.\n");
1312 return err;
1313 }
1314
1315 mlx4_cfg.log_pg_sz_m = 1;
1316 mlx4_cfg.log_pg_sz = 0;
1317 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1318 if (err)
1319 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1320
1321 err = mlx4_dev_cap(dev, &dev_cap);
1322 if (err) {
1323 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
1324 goto err_stop_fw;
1325 }
1326
1327 if (mlx4_is_master(dev))
1328 mlx4_parav_master_pf_caps(dev);
1329
1330 priv->fs_hash_mode = MLX4_FS_L2_HASH;
1331
1332 switch (priv->fs_hash_mode) {
1333 case MLX4_FS_L2_HASH:
1334 init_hca.fs_hash_enable_bits = 0;
1335 break;
1336
1337 case MLX4_FS_L2_L3_L4_HASH:
1338
1339
1340
1341 init_hca.fs_hash_enable_bits =
1342 MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
1343 break;
1344 }
1345
1346 profile = default_profile;
1347 if (dev->caps.steering_mode ==
1348 MLX4_STEERING_MODE_DEVICE_MANAGED)
1349 profile.num_mcg = MLX4_FS_NUM_MCG;
1350
1351 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1352 &init_hca);
1353 if ((long long) icm_size < 0) {
1354 err = icm_size;
1355 goto err_stop_fw;
1356 }
1357
1358 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1359
1360 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1361 init_hca.uar_page_sz = PAGE_SHIFT - 12;
1362
1363 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1364 if (err)
1365 goto err_stop_fw;
1366
1367 err = mlx4_INIT_HCA(dev, &init_hca);
1368 if (err) {
1369 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
1370 goto err_free_icm;
1371 }
1372 } else {
1373 err = mlx4_init_slave(dev);
1374 if (err) {
1375 mlx4_err(dev, "Failed to initialize slave\n");
1376 return err;
1377 }
1378
1379 err = mlx4_slave_cap(dev);
1380 if (err) {
1381 mlx4_err(dev, "Failed to obtain slave caps\n");
1382 goto err_close;
1383 }
1384 }
1385
1386 if (map_bf_area(dev))
1387 mlx4_dbg(dev, "Failed to map blue flame area\n");
1388
1389
1390 if (!mlx4_is_slave(dev))
1391 mlx4_set_port_mask(dev);
1392
1393 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1394 if (err) {
1395 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
1396 goto unmap_bf;
1397 }
1398
1399 priv->eq_table.inta_pin = adapter.inta_pin;
1400 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1401
1402 return 0;
1403
1404unmap_bf:
1405 unmap_bf_area(dev);
1406
1407err_close:
1408 if (mlx4_is_slave(dev))
1409 mlx4_slave_exit(dev);
1410 else
1411 mlx4_CLOSE_HCA(dev, 0);
1412
1413err_free_icm:
1414 if (!mlx4_is_slave(dev))
1415 mlx4_free_icms(dev);
1416
1417err_stop_fw:
1418 if (!mlx4_is_slave(dev)) {
1419 mlx4_UNMAP_FA(dev);
1420 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1421 }
1422 return err;
1423}
1424
1425static int mlx4_init_counters_table(struct mlx4_dev *dev)
1426{
1427 struct mlx4_priv *priv = mlx4_priv(dev);
1428 int nent;
1429
1430 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1431 return -ENOENT;
1432
1433 nent = dev->caps.max_counters;
1434 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1435}
1436
1437static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1438{
1439 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1440}
1441
1442int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1443{
1444 struct mlx4_priv *priv = mlx4_priv(dev);
1445
1446 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1447 return -ENOENT;
1448
1449 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1450 if (*idx == -1)
1451 return -ENOMEM;
1452
1453 return 0;
1454}
1455
1456int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1457{
1458 u64 out_param;
1459 int err;
1460
1461 if (mlx4_is_mfunc(dev)) {
1462 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1463 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1464 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1465 if (!err)
1466 *idx = get_param_l(&out_param);
1467
1468 return err;
1469 }
1470 return __mlx4_counter_alloc(dev, idx);
1471}
1472EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1473
1474void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1475{
1476 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
1477 return;
1478}
1479
1480void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1481{
1482 u64 in_param;
1483
1484 if (mlx4_is_mfunc(dev)) {
1485 set_param_l(&in_param, idx);
1486 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1487 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1488 MLX4_CMD_WRAPPED);
1489 return;
1490 }
1491 __mlx4_counter_free(dev, idx);
1492}
1493EXPORT_SYMBOL_GPL(mlx4_counter_free);
1494
1495static int mlx4_setup_hca(struct mlx4_dev *dev)
1496{
1497 struct mlx4_priv *priv = mlx4_priv(dev);
1498 int err;
1499 int port;
1500 __be32 ib_port_default_caps;
1501
1502 err = mlx4_init_uar_table(dev);
1503 if (err) {
1504 mlx4_err(dev, "Failed to initialize "
1505 "user access region table, aborting.\n");
1506 return err;
1507 }
1508
1509 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1510 if (err) {
1511 mlx4_err(dev, "Failed to allocate driver access region, "
1512 "aborting.\n");
1513 goto err_uar_table_free;
1514 }
1515
1516 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1517 if (!priv->kar) {
1518 mlx4_err(dev, "Couldn't map kernel access region, "
1519 "aborting.\n");
1520 err = -ENOMEM;
1521 goto err_uar_free;
1522 }
1523
1524 err = mlx4_init_pd_table(dev);
1525 if (err) {
1526 mlx4_err(dev, "Failed to initialize "
1527 "protection domain table, aborting.\n");
1528 goto err_kar_unmap;
1529 }
1530
1531 err = mlx4_init_xrcd_table(dev);
1532 if (err) {
1533 mlx4_err(dev, "Failed to initialize "
1534 "reliable connection domain table, aborting.\n");
1535 goto err_pd_table_free;
1536 }
1537
1538 err = mlx4_init_mr_table(dev);
1539 if (err) {
1540 mlx4_err(dev, "Failed to initialize "
1541 "memory region table, aborting.\n");
1542 goto err_xrcd_table_free;
1543 }
1544
1545 err = mlx4_init_eq_table(dev);
1546 if (err) {
1547 mlx4_err(dev, "Failed to initialize "
1548 "event queue table, aborting.\n");
1549 goto err_mr_table_free;
1550 }
1551
1552 err = mlx4_cmd_use_events(dev);
1553 if (err) {
1554 mlx4_err(dev, "Failed to switch to event-driven "
1555 "firmware commands, aborting.\n");
1556 goto err_eq_table_free;
1557 }
1558
1559 err = mlx4_NOP(dev);
1560 if (err) {
1561 if (dev->flags & MLX4_FLAG_MSI_X) {
1562 mlx4_warn(dev, "NOP command failed to generate MSI-X "
1563 "interrupt IRQ %d).\n",
1564 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1565 mlx4_warn(dev, "Trying again without MSI-X.\n");
1566 } else {
1567 mlx4_err(dev, "NOP command failed to generate interrupt "
1568 "(IRQ %d), aborting.\n",
1569 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1570 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1571 }
1572
1573 goto err_cmd_poll;
1574 }
1575
1576 mlx4_dbg(dev, "NOP command IRQ test passed\n");
1577
1578 err = mlx4_init_cq_table(dev);
1579 if (err) {
1580 mlx4_err(dev, "Failed to initialize "
1581 "completion queue table, aborting.\n");
1582 goto err_cmd_poll;
1583 }
1584
1585 err = mlx4_init_srq_table(dev);
1586 if (err) {
1587 mlx4_err(dev, "Failed to initialize "
1588 "shared receive queue table, aborting.\n");
1589 goto err_cq_table_free;
1590 }
1591
1592 err = mlx4_init_qp_table(dev);
1593 if (err) {
1594 mlx4_err(dev, "Failed to initialize "
1595 "queue pair table, aborting.\n");
1596 goto err_srq_table_free;
1597 }
1598
1599 if (!mlx4_is_slave(dev)) {
1600 err = mlx4_init_mcg_table(dev);
1601 if (err) {
1602 mlx4_err(dev, "Failed to initialize "
1603 "multicast group table, aborting.\n");
1604 goto err_qp_table_free;
1605 }
1606 }
1607
1608 err = mlx4_init_counters_table(dev);
1609 if (err && err != -ENOENT) {
1610 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1611 goto err_mcg_table_free;
1612 }
1613
1614 if (!mlx4_is_slave(dev)) {
1615 for (port = 1; port <= dev->caps.num_ports; port++) {
1616 ib_port_default_caps = 0;
1617 err = mlx4_get_port_ib_caps(dev, port,
1618 &ib_port_default_caps);
1619 if (err)
1620 mlx4_warn(dev, "failed to get port %d default "
1621 "ib capabilities (%d). Continuing "
1622 "with caps = 0\n", port, err);
1623 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1624
1625
1626 if (mlx4_is_master(dev)) {
1627 int i;
1628 for (i = 0; i < dev->num_slaves; i++) {
1629 if (i == mlx4_master_func_num(dev))
1630 continue;
1631 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1632 ib_port_default_caps;
1633 }
1634 }
1635
1636 if (mlx4_is_mfunc(dev))
1637 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
1638 else
1639 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
1640
1641 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
1642 dev->caps.pkey_table_len[port] : -1);
1643 if (err) {
1644 mlx4_err(dev, "Failed to set port %d, aborting\n",
1645 port);
1646 goto err_counters_table_free;
1647 }
1648 }
1649 }
1650
1651 return 0;
1652
1653err_counters_table_free:
1654 mlx4_cleanup_counters_table(dev);
1655
1656err_mcg_table_free:
1657 mlx4_cleanup_mcg_table(dev);
1658
1659err_qp_table_free:
1660 mlx4_cleanup_qp_table(dev);
1661
1662err_srq_table_free:
1663 mlx4_cleanup_srq_table(dev);
1664
1665err_cq_table_free:
1666 mlx4_cleanup_cq_table(dev);
1667
1668err_cmd_poll:
1669 mlx4_cmd_use_polling(dev);
1670
1671err_eq_table_free:
1672 mlx4_cleanup_eq_table(dev);
1673
1674err_mr_table_free:
1675 mlx4_cleanup_mr_table(dev);
1676
1677err_xrcd_table_free:
1678 mlx4_cleanup_xrcd_table(dev);
1679
1680err_pd_table_free:
1681 mlx4_cleanup_pd_table(dev);
1682
1683err_kar_unmap:
1684 iounmap(priv->kar);
1685
1686err_uar_free:
1687 mlx4_uar_free(dev, &priv->driver_uar);
1688
1689err_uar_table_free:
1690 mlx4_cleanup_uar_table(dev);
1691 return err;
1692}
1693
1694static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1695{
1696 struct mlx4_priv *priv = mlx4_priv(dev);
1697 struct msix_entry *entries;
1698 int nreq = min_t(int, dev->caps.num_ports *
1699 min_t(int, netif_get_num_default_rss_queues() + 1,
1700 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
1701 int err;
1702 int i;
1703
1704 if (msi_x) {
1705
1706
1707
1708 if (mlx4_is_mfunc(dev)) {
1709 nreq = 2;
1710 } else {
1711 nreq = min_t(int, dev->caps.num_eqs -
1712 dev->caps.reserved_eqs, nreq);
1713 }
1714
1715 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1716 if (!entries)
1717 goto no_msi;
1718
1719 for (i = 0; i < nreq; ++i)
1720 entries[i].entry = i;
1721
1722 retry:
1723 err = pci_enable_msix(dev->pdev, entries, nreq);
1724 if (err) {
1725
1726 if (err > 1) {
1727 mlx4_info(dev, "Requested %d vectors, "
1728 "but only %d MSI-X vectors available, "
1729 "trying again\n", nreq, err);
1730 nreq = err;
1731 goto retry;
1732 }
1733 kfree(entries);
1734 goto no_msi;
1735 }
1736
1737 if (nreq <
1738 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1739
1740 dev->caps.comp_pool = 0;
1741 dev->caps.num_comp_vectors = nreq - 1;
1742 } else {
1743 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1744 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1745 }
1746 for (i = 0; i < nreq; ++i)
1747 priv->eq_table.eq[i].irq = entries[i].vector;
1748
1749 dev->flags |= MLX4_FLAG_MSI_X;
1750
1751 kfree(entries);
1752 return;
1753 }
1754
1755no_msi:
1756 dev->caps.num_comp_vectors = 1;
1757 dev->caps.comp_pool = 0;
1758
1759 for (i = 0; i < 2; ++i)
1760 priv->eq_table.eq[i].irq = dev->pdev->irq;
1761}
1762
1763static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1764{
1765 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1766 int err = 0;
1767
1768 info->dev = dev;
1769 info->port = port;
1770 if (!mlx4_is_slave(dev)) {
1771 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
1772 mlx4_init_mac_table(dev, &info->mac_table);
1773 mlx4_init_vlan_table(dev, &info->vlan_table);
1774 info->base_qpn =
1775 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
1776 (port - 1) * (1 << log_num_mac);
1777 }
1778
1779 sprintf(info->dev_name, "mlx4_port%d", port);
1780 info->port_attr.attr.name = info->dev_name;
1781 if (mlx4_is_mfunc(dev))
1782 info->port_attr.attr.mode = S_IRUGO;
1783 else {
1784 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1785 info->port_attr.store = set_port_type;
1786 }
1787 info->port_attr.show = show_port_type;
1788 sysfs_attr_init(&info->port_attr.attr);
1789
1790 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1791 if (err) {
1792 mlx4_err(dev, "Failed to create file for port %d\n", port);
1793 info->port = -1;
1794 }
1795
1796 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
1797 info->port_mtu_attr.attr.name = info->dev_mtu_name;
1798 if (mlx4_is_mfunc(dev))
1799 info->port_mtu_attr.attr.mode = S_IRUGO;
1800 else {
1801 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
1802 info->port_mtu_attr.store = set_port_ib_mtu;
1803 }
1804 info->port_mtu_attr.show = show_port_ib_mtu;
1805 sysfs_attr_init(&info->port_mtu_attr.attr);
1806
1807 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
1808 if (err) {
1809 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
1810 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1811 info->port = -1;
1812 }
1813
1814 return err;
1815}
1816
1817static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1818{
1819 if (info->port < 0)
1820 return;
1821
1822 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1823 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
1824}
1825
1826static int mlx4_init_steering(struct mlx4_dev *dev)
1827{
1828 struct mlx4_priv *priv = mlx4_priv(dev);
1829 int num_entries = dev->caps.num_ports;
1830 int i, j;
1831
1832 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1833 if (!priv->steer)
1834 return -ENOMEM;
1835
1836 for (i = 0; i < num_entries; i++)
1837 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1838 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1839 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1840 }
1841 return 0;
1842}
1843
1844static void mlx4_clear_steering(struct mlx4_dev *dev)
1845{
1846 struct mlx4_priv *priv = mlx4_priv(dev);
1847 struct mlx4_steer_index *entry, *tmp_entry;
1848 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1849 int num_entries = dev->caps.num_ports;
1850 int i, j;
1851
1852 for (i = 0; i < num_entries; i++) {
1853 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1854 list_for_each_entry_safe(pqp, tmp_pqp,
1855 &priv->steer[i].promisc_qps[j],
1856 list) {
1857 list_del(&pqp->list);
1858 kfree(pqp);
1859 }
1860 list_for_each_entry_safe(entry, tmp_entry,
1861 &priv->steer[i].steer_entries[j],
1862 list) {
1863 list_del(&entry->list);
1864 list_for_each_entry_safe(pqp, tmp_pqp,
1865 &entry->duplicates,
1866 list) {
1867 list_del(&pqp->list);
1868 kfree(pqp);
1869 }
1870 kfree(entry);
1871 }
1872 }
1873 }
1874 kfree(priv->steer);
1875}
1876
1877static int extended_func_num(struct pci_dev *pdev)
1878{
1879 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
1880}
1881
1882#define MLX4_OWNER_BASE 0x8069c
1883#define MLX4_OWNER_SIZE 4
1884
1885static int mlx4_get_ownership(struct mlx4_dev *dev)
1886{
1887 void __iomem *owner;
1888 u32 ret;
1889
1890 if (pci_channel_offline(dev->pdev))
1891 return -EIO;
1892
1893 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
1894 MLX4_OWNER_SIZE);
1895 if (!owner) {
1896 mlx4_err(dev, "Failed to obtain ownership bit\n");
1897 return -ENOMEM;
1898 }
1899
1900 ret = readl(owner);
1901 iounmap(owner);
1902 return (int) !!ret;
1903}
1904
1905static void mlx4_free_ownership(struct mlx4_dev *dev)
1906{
1907 void __iomem *owner;
1908
1909 if (pci_channel_offline(dev->pdev))
1910 return;
1911
1912 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
1913 MLX4_OWNER_SIZE);
1914 if (!owner) {
1915 mlx4_err(dev, "Failed to obtain ownership bit\n");
1916 return;
1917 }
1918 writel(0, owner);
1919 msleep(1000);
1920 iounmap(owner);
1921}
1922
1923static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
1924{
1925 struct mlx4_priv *priv;
1926 struct mlx4_dev *dev;
1927 int err;
1928 int port;
1929
1930 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1931
1932 err = pci_enable_device(pdev);
1933 if (err) {
1934 dev_err(&pdev->dev, "Cannot enable PCI device, "
1935 "aborting.\n");
1936 return err;
1937 }
1938 if (num_vfs > MLX4_MAX_NUM_VF) {
1939 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
1940 num_vfs, MLX4_MAX_NUM_VF);
1941 return -EINVAL;
1942 }
1943
1944
1945
1946 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
1947 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1948 dev_err(&pdev->dev, "Missing DCS, aborting."
1949 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
1950 pci_dev_data, pci_resource_flags(pdev, 0));
1951 err = -ENODEV;
1952 goto err_disable_pdev;
1953 }
1954 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1955 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1956 err = -ENODEV;
1957 goto err_disable_pdev;
1958 }
1959
1960 err = pci_request_regions(pdev, DRV_NAME);
1961 if (err) {
1962 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1963 goto err_disable_pdev;
1964 }
1965
1966 pci_set_master(pdev);
1967
1968 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1969 if (err) {
1970 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1971 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1972 if (err) {
1973 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1974 goto err_release_regions;
1975 }
1976 }
1977 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1978 if (err) {
1979 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1980 "consistent PCI DMA mask.\n");
1981 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1982 if (err) {
1983 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1984 "aborting.\n");
1985 goto err_release_regions;
1986 }
1987 }
1988
1989
1990 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1991
1992 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1993 if (!priv) {
1994 dev_err(&pdev->dev, "Device struct alloc failed, "
1995 "aborting.\n");
1996 err = -ENOMEM;
1997 goto err_release_regions;
1998 }
1999
2000 dev = &priv->dev;
2001 dev->pdev = pdev;
2002 INIT_LIST_HEAD(&priv->ctx_list);
2003 spin_lock_init(&priv->ctx_lock);
2004
2005 mutex_init(&priv->port_mutex);
2006
2007 INIT_LIST_HEAD(&priv->pgdir_list);
2008 mutex_init(&priv->pgdir_mutex);
2009
2010 INIT_LIST_HEAD(&priv->bf_list);
2011 mutex_init(&priv->bf_mutex);
2012
2013 dev->rev_id = pdev->revision;
2014
2015 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2016
2017
2018 if (num_vfs && extended_func_num(pdev) > probe_vf) {
2019 mlx4_warn(dev, "Skipping virtual function:%d\n",
2020 extended_func_num(pdev));
2021 err = -ENODEV;
2022 goto err_free_dev;
2023 }
2024 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2025 dev->flags |= MLX4_FLAG_SLAVE;
2026 } else {
2027
2028
2029
2030 err = mlx4_get_ownership(dev);
2031 if (err) {
2032 if (err < 0)
2033 goto err_free_dev;
2034 else {
2035 mlx4_warn(dev, "Multiple PFs not yet supported."
2036 " Skipping PF.\n");
2037 err = -EINVAL;
2038 goto err_free_dev;
2039 }
2040 }
2041
2042 if (num_vfs) {
2043 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
2044 err = pci_enable_sriov(pdev, num_vfs);
2045 if (err) {
2046 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2047 err);
2048 err = 0;
2049 } else {
2050 mlx4_warn(dev, "Running in master mode\n");
2051 dev->flags |= MLX4_FLAG_SRIOV |
2052 MLX4_FLAG_MASTER;
2053 dev->num_vfs = num_vfs;
2054 }
2055 }
2056
2057
2058
2059
2060
2061
2062 err = mlx4_reset(dev);
2063 if (err) {
2064 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
2065 goto err_rel_own;
2066 }
2067 }
2068
2069slave_start:
2070 err = mlx4_cmd_init(dev);
2071 if (err) {
2072 mlx4_err(dev, "Failed to init command interface, aborting.\n");
2073 goto err_sriov;
2074 }
2075
2076
2077
2078
2079 if (mlx4_is_mfunc(dev)) {
2080 if (mlx4_is_master(dev))
2081 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2082 else {
2083 dev->num_slaves = 0;
2084 if (mlx4_multi_func_init(dev)) {
2085 mlx4_err(dev, "Failed to init slave mfunc"
2086 " interface, aborting.\n");
2087 goto err_cmd;
2088 }
2089 }
2090 }
2091
2092 err = mlx4_init_hca(dev);
2093 if (err) {
2094 if (err == -EACCES) {
2095
2096
2097 mlx4_cmd_cleanup(dev);
2098 dev->flags |= MLX4_FLAG_SLAVE;
2099 dev->flags &= ~MLX4_FLAG_MASTER;
2100 goto slave_start;
2101 } else
2102 goto err_mfunc;
2103 }
2104
2105
2106
2107 if (mlx4_is_master(dev)) {
2108 if (mlx4_multi_func_init(dev)) {
2109 mlx4_err(dev, "Failed to init master mfunc"
2110 "interface, aborting.\n");
2111 goto err_close;
2112 }
2113 }
2114
2115 err = mlx4_alloc_eq_table(dev);
2116 if (err)
2117 goto err_master_mfunc;
2118
2119 priv->msix_ctl.pool_bm = 0;
2120 mutex_init(&priv->msix_ctl.pool_lock);
2121
2122 mlx4_enable_msi_x(dev);
2123 if ((mlx4_is_mfunc(dev)) &&
2124 !(dev->flags & MLX4_FLAG_MSI_X)) {
2125 mlx4_err(dev, "INTx is not supported in multi-function mode."
2126 " aborting.\n");
2127 goto err_free_eq;
2128 }
2129
2130 if (!mlx4_is_slave(dev)) {
2131 err = mlx4_init_steering(dev);
2132 if (err)
2133 goto err_free_eq;
2134 }
2135
2136 err = mlx4_setup_hca(dev);
2137 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
2138 !mlx4_is_mfunc(dev)) {
2139 dev->flags &= ~MLX4_FLAG_MSI_X;
2140 dev->caps.num_comp_vectors = 1;
2141 dev->caps.comp_pool = 0;
2142 pci_disable_msix(pdev);
2143 err = mlx4_setup_hca(dev);
2144 }
2145
2146 if (err)
2147 goto err_steer;
2148
2149 for (port = 1; port <= dev->caps.num_ports; port++) {
2150 err = mlx4_init_port_info(dev, port);
2151 if (err)
2152 goto err_port;
2153 }
2154
2155 err = mlx4_register_device(dev);
2156 if (err)
2157 goto err_port;
2158
2159 mlx4_sense_init(dev);
2160 mlx4_start_sense(dev);
2161
2162 priv->pci_dev_data = pci_dev_data;
2163 pci_set_drvdata(pdev, dev);
2164
2165 return 0;
2166
2167err_port:
2168 for (--port; port >= 1; --port)
2169 mlx4_cleanup_port_info(&priv->port[port]);
2170
2171 mlx4_cleanup_counters_table(dev);
2172 mlx4_cleanup_mcg_table(dev);
2173 mlx4_cleanup_qp_table(dev);
2174 mlx4_cleanup_srq_table(dev);
2175 mlx4_cleanup_cq_table(dev);
2176 mlx4_cmd_use_polling(dev);
2177 mlx4_cleanup_eq_table(dev);
2178 mlx4_cleanup_mr_table(dev);
2179 mlx4_cleanup_xrcd_table(dev);
2180 mlx4_cleanup_pd_table(dev);
2181 mlx4_cleanup_uar_table(dev);
2182
2183err_steer:
2184 if (!mlx4_is_slave(dev))
2185 mlx4_clear_steering(dev);
2186
2187err_free_eq:
2188 mlx4_free_eq_table(dev);
2189
2190err_master_mfunc:
2191 if (mlx4_is_master(dev))
2192 mlx4_multi_func_cleanup(dev);
2193
2194err_close:
2195 if (dev->flags & MLX4_FLAG_MSI_X)
2196 pci_disable_msix(pdev);
2197
2198 mlx4_close_hca(dev);
2199
2200err_mfunc:
2201 if (mlx4_is_slave(dev))
2202 mlx4_multi_func_cleanup(dev);
2203
2204err_cmd:
2205 mlx4_cmd_cleanup(dev);
2206
2207err_sriov:
2208 if (dev->flags & MLX4_FLAG_SRIOV)
2209 pci_disable_sriov(pdev);
2210
2211err_rel_own:
2212 if (!mlx4_is_slave(dev))
2213 mlx4_free_ownership(dev);
2214
2215err_free_dev:
2216 kfree(priv);
2217
2218err_release_regions:
2219 pci_release_regions(pdev);
2220
2221err_disable_pdev:
2222 pci_disable_device(pdev);
2223 pci_set_drvdata(pdev, NULL);
2224 return err;
2225}
2226
2227static int __devinit mlx4_init_one(struct pci_dev *pdev,
2228 const struct pci_device_id *id)
2229{
2230 printk_once(KERN_INFO "%s", mlx4_version);
2231
2232 return __mlx4_init_one(pdev, id->driver_data);
2233}
2234
2235static void mlx4_remove_one(struct pci_dev *pdev)
2236{
2237 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2238 struct mlx4_priv *priv = mlx4_priv(dev);
2239 int p;
2240
2241 if (dev) {
2242
2243
2244 if (mlx4_is_master(dev)) {
2245 if (mlx4_how_many_lives_vf(dev))
2246 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2247 }
2248 mlx4_stop_sense(dev);
2249 mlx4_unregister_device(dev);
2250
2251 for (p = 1; p <= dev->caps.num_ports; p++) {
2252 mlx4_cleanup_port_info(&priv->port[p]);
2253 mlx4_CLOSE_PORT(dev, p);
2254 }
2255
2256 if (mlx4_is_master(dev))
2257 mlx4_free_resource_tracker(dev,
2258 RES_TR_FREE_SLAVES_ONLY);
2259
2260 mlx4_cleanup_counters_table(dev);
2261 mlx4_cleanup_mcg_table(dev);
2262 mlx4_cleanup_qp_table(dev);
2263 mlx4_cleanup_srq_table(dev);
2264 mlx4_cleanup_cq_table(dev);
2265 mlx4_cmd_use_polling(dev);
2266 mlx4_cleanup_eq_table(dev);
2267 mlx4_cleanup_mr_table(dev);
2268 mlx4_cleanup_xrcd_table(dev);
2269 mlx4_cleanup_pd_table(dev);
2270
2271 if (mlx4_is_master(dev))
2272 mlx4_free_resource_tracker(dev,
2273 RES_TR_FREE_STRUCTS_ONLY);
2274
2275 iounmap(priv->kar);
2276 mlx4_uar_free(dev, &priv->driver_uar);
2277 mlx4_cleanup_uar_table(dev);
2278 if (!mlx4_is_slave(dev))
2279 mlx4_clear_steering(dev);
2280 mlx4_free_eq_table(dev);
2281 if (mlx4_is_master(dev))
2282 mlx4_multi_func_cleanup(dev);
2283 mlx4_close_hca(dev);
2284 if (mlx4_is_slave(dev))
2285 mlx4_multi_func_cleanup(dev);
2286 mlx4_cmd_cleanup(dev);
2287
2288 if (dev->flags & MLX4_FLAG_MSI_X)
2289 pci_disable_msix(pdev);
2290 if (dev->flags & MLX4_FLAG_SRIOV) {
2291 mlx4_warn(dev, "Disabling SR-IOV\n");
2292 pci_disable_sriov(pdev);
2293 }
2294
2295 if (!mlx4_is_slave(dev))
2296 mlx4_free_ownership(dev);
2297
2298 kfree(dev->caps.qp0_tunnel);
2299 kfree(dev->caps.qp0_proxy);
2300 kfree(dev->caps.qp1_tunnel);
2301 kfree(dev->caps.qp1_proxy);
2302
2303 kfree(priv);
2304 pci_release_regions(pdev);
2305 pci_disable_device(pdev);
2306 pci_set_drvdata(pdev, NULL);
2307 }
2308}
2309
2310int mlx4_restart_one(struct pci_dev *pdev)
2311{
2312 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2313 struct mlx4_priv *priv = mlx4_priv(dev);
2314 int pci_dev_data;
2315
2316 pci_dev_data = priv->pci_dev_data;
2317 mlx4_remove_one(pdev);
2318 return __mlx4_init_one(pdev, pci_dev_data);
2319}
2320
2321static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
2322
2323 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2324
2325 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2326
2327 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2328
2329 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2330
2331 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2332
2333 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2334
2335 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2336
2337 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2338
2339 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2340
2341 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2342
2343 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2344
2345 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2346
2347 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
2348
2349 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2350
2351 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
2352 { PCI_VDEVICE(MELLANOX, 0x1005), 0 },
2353 { PCI_VDEVICE(MELLANOX, 0x1006), 0 },
2354 { PCI_VDEVICE(MELLANOX, 0x1007), 0 },
2355 { PCI_VDEVICE(MELLANOX, 0x1008), 0 },
2356 { PCI_VDEVICE(MELLANOX, 0x1009), 0 },
2357 { PCI_VDEVICE(MELLANOX, 0x100a), 0 },
2358 { PCI_VDEVICE(MELLANOX, 0x100b), 0 },
2359 { PCI_VDEVICE(MELLANOX, 0x100c), 0 },
2360 { PCI_VDEVICE(MELLANOX, 0x100d), 0 },
2361 { PCI_VDEVICE(MELLANOX, 0x100e), 0 },
2362 { PCI_VDEVICE(MELLANOX, 0x100f), 0 },
2363 { PCI_VDEVICE(MELLANOX, 0x1010), 0 },
2364 { 0, }
2365};
2366
2367MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2368
2369static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2370 pci_channel_state_t state)
2371{
2372 mlx4_remove_one(pdev);
2373
2374 return state == pci_channel_io_perm_failure ?
2375 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2376}
2377
2378static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2379{
2380 int ret = __mlx4_init_one(pdev, 0);
2381
2382 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2383}
2384
2385static const struct pci_error_handlers mlx4_err_handler = {
2386 .error_detected = mlx4_pci_err_detected,
2387 .slot_reset = mlx4_pci_slot_reset,
2388};
2389
2390static struct pci_driver mlx4_driver = {
2391 .name = DRV_NAME,
2392 .id_table = mlx4_pci_table,
2393 .probe = mlx4_init_one,
2394 .remove = __devexit_p(mlx4_remove_one),
2395 .err_handler = &mlx4_err_handler,
2396};
2397
2398static int __init mlx4_verify_params(void)
2399{
2400 if ((log_num_mac < 0) || (log_num_mac > 7)) {
2401 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
2402 return -1;
2403 }
2404
2405 if (log_num_vlan != 0)
2406 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2407 MLX4_LOG_NUM_VLANS);
2408
2409 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2410 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
2411 return -1;
2412 }
2413
2414
2415 if (port_type_array[0] == false && port_type_array[1] == true) {
2416 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2417 port_type_array[0] = true;
2418 }
2419
2420 return 0;
2421}
2422
2423static int __init mlx4_init(void)
2424{
2425 int ret;
2426
2427 if (mlx4_verify_params())
2428 return -EINVAL;
2429
2430 mlx4_catas_init();
2431
2432 mlx4_wq = create_singlethread_workqueue("mlx4");
2433 if (!mlx4_wq)
2434 return -ENOMEM;
2435
2436 ret = pci_register_driver(&mlx4_driver);
2437 return ret < 0 ? ret : 0;
2438}
2439
2440static void __exit mlx4_cleanup(void)
2441{
2442 pci_unregister_driver(&mlx4_driver);
2443 destroy_workqueue(mlx4_wq);
2444}
2445
2446module_init(mlx4_init);
2447module_exit(mlx4_cleanup);
2448