1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
51#include <linux/if_arp.h>
52#include <linux/in.h>
53#include <linux/ip.h>
54#include <linux/ipv6.h>
55#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
58#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
66#include <linux/workqueue.h>
67#include <linux/if_vlan.h>
68#include <linux/utsname.h>
69
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
75MODULE_AUTHOR("Santiago Leon");
76MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static int ibmvnic_remove(struct vio_dev *);
82static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
89static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99static int ibmvnic_poll(struct napi_struct *napi, int data);
100static void send_map_query(struct ibmvnic_adapter *adapter);
101static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102static int send_request_unmap(struct ibmvnic_adapter *, u8);
103static int send_login(struct ibmvnic_adapter *adapter);
104static void send_cap_queries(struct ibmvnic_adapter *adapter);
105static int init_sub_crqs(struct ibmvnic_adapter *);
106static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107static int ibmvnic_init(struct ibmvnic_adapter *);
108static int ibmvnic_reset_init(struct ibmvnic_adapter *);
109static void release_crq_queue(struct ibmvnic_adapter *);
110static int __ibmvnic_set_mac(struct net_device *, u8 *);
111static int init_crq_queue(struct ibmvnic_adapter *adapter);
112static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
113
114struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117};
118
119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122
123static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146};
147
148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
150 unsigned long *irq)
151{
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 long rc;
154
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 *number = retbuf[0];
157 *irq = retbuf[1];
158
159 return rc;
160}
161
162static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
163 struct ibmvnic_long_term_buff *ltb, int size)
164{
165 struct device *dev = &adapter->vdev->dev;
166 int rc;
167
168 ltb->size = size;
169 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
170 GFP_KERNEL);
171
172 if (!ltb->buff) {
173 dev_err(dev, "Couldn't alloc long term buffer\n");
174 return -ENOMEM;
175 }
176 ltb->map_id = adapter->map_id;
177 adapter->map_id++;
178
179 init_completion(&adapter->fw_done);
180 rc = send_request_map(adapter, ltb->addr,
181 ltb->size, ltb->map_id);
182 if (rc) {
183 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
184 return rc;
185 }
186 wait_for_completion(&adapter->fw_done);
187
188 if (adapter->fw_done_rc) {
189 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
190 adapter->fw_done_rc);
191 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
192 return -1;
193 }
194 return 0;
195}
196
197static void free_long_term_buff(struct ibmvnic_adapter *adapter,
198 struct ibmvnic_long_term_buff *ltb)
199{
200 struct device *dev = &adapter->vdev->dev;
201
202 if (!ltb->buff)
203 return;
204
205 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
206 adapter->reset_reason != VNIC_RESET_MOBILITY)
207 send_request_unmap(adapter, ltb->map_id);
208 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
209}
210
211static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
212 struct ibmvnic_long_term_buff *ltb)
213{
214 int rc;
215
216 memset(ltb->buff, 0, ltb->size);
217
218 init_completion(&adapter->fw_done);
219 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 if (rc)
221 return rc;
222 wait_for_completion(&adapter->fw_done);
223
224 if (adapter->fw_done_rc) {
225 dev_info(&adapter->vdev->dev,
226 "Reset failed, attempting to free and reallocate buffer\n");
227 free_long_term_buff(adapter, ltb);
228 return alloc_long_term_buff(adapter, ltb, ltb->size);
229 }
230 return 0;
231}
232
233static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
234{
235 int i;
236
237 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
238 i++)
239 adapter->rx_pool[i].active = 0;
240}
241
242static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_rx_pool *pool)
244{
245 int count = pool->size - atomic_read(&pool->available);
246 struct device *dev = &adapter->vdev->dev;
247 int buffers_added = 0;
248 unsigned long lpar_rc;
249 union sub_crq sub_crq;
250 struct sk_buff *skb;
251 unsigned int offset;
252 dma_addr_t dma_addr;
253 unsigned char *dst;
254 u64 *handle_array;
255 int shift = 0;
256 int index;
257 int i;
258
259 if (!pool->active)
260 return;
261
262 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
263 be32_to_cpu(adapter->login_rsp_buf->
264 off_rxadd_subcrqs));
265
266 for (i = 0; i < count; ++i) {
267 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
268 if (!skb) {
269 dev_err(dev, "Couldn't replenish rx buff\n");
270 adapter->replenish_no_mem++;
271 break;
272 }
273
274 index = pool->free_map[pool->next_free];
275
276 if (pool->rx_buff[index].skb)
277 dev_err(dev, "Inconsistent free_map!\n");
278
279
280 offset = index * pool->buff_size;
281 dst = pool->long_term_buff.buff + offset;
282 memset(dst, 0, pool->buff_size);
283 dma_addr = pool->long_term_buff.addr + offset;
284 pool->rx_buff[index].data = dst;
285
286 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
287 pool->rx_buff[index].dma = dma_addr;
288 pool->rx_buff[index].skb = skb;
289 pool->rx_buff[index].pool_index = pool->index;
290 pool->rx_buff[index].size = pool->buff_size;
291
292 memset(&sub_crq, 0, sizeof(sub_crq));
293 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
294 sub_crq.rx_add.correlator =
295 cpu_to_be64((u64)&pool->rx_buff[index]);
296 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
297 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
298
299
300
301
302
303
304#ifdef __LITTLE_ENDIAN__
305 shift = 8;
306#endif
307 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
308
309 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
310 &sub_crq);
311 if (lpar_rc != H_SUCCESS)
312 goto failure;
313
314 buffers_added++;
315 adapter->replenish_add_buff_success++;
316 pool->next_free = (pool->next_free + 1) % pool->size;
317 }
318 atomic_add(buffers_added, &pool->available);
319 return;
320
321failure:
322 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
323 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
326
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
330
331 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
332
333
334
335
336
337 deactivate_rx_pools(adapter);
338 netif_carrier_off(adapter->netdev);
339 }
340}
341
342static void replenish_pools(struct ibmvnic_adapter *adapter)
343{
344 int i;
345
346 adapter->replenish_task_cycles++;
347 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
348 i++) {
349 if (adapter->rx_pool[i].active)
350 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
351 }
352}
353
354static void release_stats_buffers(struct ibmvnic_adapter *adapter)
355{
356 kfree(adapter->tx_stats_buffers);
357 kfree(adapter->rx_stats_buffers);
358 adapter->tx_stats_buffers = NULL;
359 adapter->rx_stats_buffers = NULL;
360}
361
362static int init_stats_buffers(struct ibmvnic_adapter *adapter)
363{
364 adapter->tx_stats_buffers =
365 kcalloc(IBMVNIC_MAX_QUEUES,
366 sizeof(struct ibmvnic_tx_queue_stats),
367 GFP_KERNEL);
368 if (!adapter->tx_stats_buffers)
369 return -ENOMEM;
370
371 adapter->rx_stats_buffers =
372 kcalloc(IBMVNIC_MAX_QUEUES,
373 sizeof(struct ibmvnic_rx_queue_stats),
374 GFP_KERNEL);
375 if (!adapter->rx_stats_buffers)
376 return -ENOMEM;
377
378 return 0;
379}
380
381static void release_stats_token(struct ibmvnic_adapter *adapter)
382{
383 struct device *dev = &adapter->vdev->dev;
384
385 if (!adapter->stats_token)
386 return;
387
388 dma_unmap_single(dev, adapter->stats_token,
389 sizeof(struct ibmvnic_statistics),
390 DMA_FROM_DEVICE);
391 adapter->stats_token = 0;
392}
393
394static int init_stats_token(struct ibmvnic_adapter *adapter)
395{
396 struct device *dev = &adapter->vdev->dev;
397 dma_addr_t stok;
398
399 stok = dma_map_single(dev, &adapter->stats,
400 sizeof(struct ibmvnic_statistics),
401 DMA_FROM_DEVICE);
402 if (dma_mapping_error(dev, stok)) {
403 dev_err(dev, "Couldn't map stats buffer\n");
404 return -1;
405 }
406
407 adapter->stats_token = stok;
408 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
409 return 0;
410}
411
412static int reset_rx_pools(struct ibmvnic_adapter *adapter)
413{
414 struct ibmvnic_rx_pool *rx_pool;
415 int rx_scrqs;
416 int i, j, rc;
417 u64 *size_array;
418
419 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
420 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
421
422 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
423 for (i = 0; i < rx_scrqs; i++) {
424 rx_pool = &adapter->rx_pool[i];
425
426 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
427
428 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
429 free_long_term_buff(adapter, &rx_pool->long_term_buff);
430 rx_pool->buff_size = be64_to_cpu(size_array[i]);
431 rc = alloc_long_term_buff(adapter,
432 &rx_pool->long_term_buff,
433 rx_pool->size *
434 rx_pool->buff_size);
435 } else {
436 rc = reset_long_term_buff(adapter,
437 &rx_pool->long_term_buff);
438 }
439
440 if (rc)
441 return rc;
442
443 for (j = 0; j < rx_pool->size; j++)
444 rx_pool->free_map[j] = j;
445
446 memset(rx_pool->rx_buff, 0,
447 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
448
449 atomic_set(&rx_pool->available, 0);
450 rx_pool->next_alloc = 0;
451 rx_pool->next_free = 0;
452 rx_pool->active = 1;
453 }
454
455 return 0;
456}
457
458static void release_rx_pools(struct ibmvnic_adapter *adapter)
459{
460 struct ibmvnic_rx_pool *rx_pool;
461 int i, j;
462
463 if (!adapter->rx_pool)
464 return;
465
466 for (i = 0; i < adapter->num_active_rx_pools; i++) {
467 rx_pool = &adapter->rx_pool[i];
468
469 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
470
471 kfree(rx_pool->free_map);
472 free_long_term_buff(adapter, &rx_pool->long_term_buff);
473
474 if (!rx_pool->rx_buff)
475 continue;
476
477 for (j = 0; j < rx_pool->size; j++) {
478 if (rx_pool->rx_buff[j].skb) {
479 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
480 rx_pool->rx_buff[j].skb = NULL;
481 }
482 }
483
484 kfree(rx_pool->rx_buff);
485 }
486
487 kfree(adapter->rx_pool);
488 adapter->rx_pool = NULL;
489 adapter->num_active_rx_pools = 0;
490}
491
492static int init_rx_pools(struct net_device *netdev)
493{
494 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
495 struct device *dev = &adapter->vdev->dev;
496 struct ibmvnic_rx_pool *rx_pool;
497 int rxadd_subcrqs;
498 u64 *size_array;
499 int i, j;
500
501 rxadd_subcrqs =
502 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
503 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
504 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
505
506 adapter->rx_pool = kcalloc(rxadd_subcrqs,
507 sizeof(struct ibmvnic_rx_pool),
508 GFP_KERNEL);
509 if (!adapter->rx_pool) {
510 dev_err(dev, "Failed to allocate rx pools\n");
511 return -1;
512 }
513
514 adapter->num_active_rx_pools = rxadd_subcrqs;
515
516 for (i = 0; i < rxadd_subcrqs; i++) {
517 rx_pool = &adapter->rx_pool[i];
518
519 netdev_dbg(adapter->netdev,
520 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
521 i, adapter->req_rx_add_entries_per_subcrq,
522 be64_to_cpu(size_array[i]));
523
524 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
525 rx_pool->index = i;
526 rx_pool->buff_size = be64_to_cpu(size_array[i]);
527 rx_pool->active = 1;
528
529 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
530 GFP_KERNEL);
531 if (!rx_pool->free_map) {
532 release_rx_pools(adapter);
533 return -1;
534 }
535
536 rx_pool->rx_buff = kcalloc(rx_pool->size,
537 sizeof(struct ibmvnic_rx_buff),
538 GFP_KERNEL);
539 if (!rx_pool->rx_buff) {
540 dev_err(dev, "Couldn't alloc rx buffers\n");
541 release_rx_pools(adapter);
542 return -1;
543 }
544
545 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
546 rx_pool->size * rx_pool->buff_size)) {
547 release_rx_pools(adapter);
548 return -1;
549 }
550
551 for (j = 0; j < rx_pool->size; ++j)
552 rx_pool->free_map[j] = j;
553
554 atomic_set(&rx_pool->available, 0);
555 rx_pool->next_alloc = 0;
556 rx_pool->next_free = 0;
557 }
558
559 return 0;
560}
561
562static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
563 struct ibmvnic_tx_pool *tx_pool)
564{
565 int rc, i;
566
567 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
568 if (rc)
569 return rc;
570
571 memset(tx_pool->tx_buff, 0,
572 tx_pool->num_buffers *
573 sizeof(struct ibmvnic_tx_buff));
574
575 for (i = 0; i < tx_pool->num_buffers; i++)
576 tx_pool->free_map[i] = i;
577
578 tx_pool->consumer_index = 0;
579 tx_pool->producer_index = 0;
580
581 return 0;
582}
583
584static int reset_tx_pools(struct ibmvnic_adapter *adapter)
585{
586 int tx_scrqs;
587 int i, rc;
588
589 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
590 for (i = 0; i < tx_scrqs; i++) {
591 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
592 if (rc)
593 return rc;
594 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
595 if (rc)
596 return rc;
597 }
598
599 return 0;
600}
601
602static void release_vpd_data(struct ibmvnic_adapter *adapter)
603{
604 if (!adapter->vpd)
605 return;
606
607 kfree(adapter->vpd->buff);
608 kfree(adapter->vpd);
609
610 adapter->vpd = NULL;
611}
612
613static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
614 struct ibmvnic_tx_pool *tx_pool)
615{
616 kfree(tx_pool->tx_buff);
617 kfree(tx_pool->free_map);
618 free_long_term_buff(adapter, &tx_pool->long_term_buff);
619}
620
621static void release_tx_pools(struct ibmvnic_adapter *adapter)
622{
623 int i;
624
625 if (!adapter->tx_pool)
626 return;
627
628 for (i = 0; i < adapter->num_active_tx_pools; i++) {
629 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
630 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
631 }
632
633 kfree(adapter->tx_pool);
634 adapter->tx_pool = NULL;
635 kfree(adapter->tso_pool);
636 adapter->tso_pool = NULL;
637 adapter->num_active_tx_pools = 0;
638}
639
640static int init_one_tx_pool(struct net_device *netdev,
641 struct ibmvnic_tx_pool *tx_pool,
642 int num_entries, int buf_size)
643{
644 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
645 int i;
646
647 tx_pool->tx_buff = kcalloc(num_entries,
648 sizeof(struct ibmvnic_tx_buff),
649 GFP_KERNEL);
650 if (!tx_pool->tx_buff)
651 return -1;
652
653 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
654 num_entries * buf_size))
655 return -1;
656
657 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
658 if (!tx_pool->free_map)
659 return -1;
660
661 for (i = 0; i < num_entries; i++)
662 tx_pool->free_map[i] = i;
663
664 tx_pool->consumer_index = 0;
665 tx_pool->producer_index = 0;
666 tx_pool->num_buffers = num_entries;
667 tx_pool->buf_size = buf_size;
668
669 return 0;
670}
671
672static int init_tx_pools(struct net_device *netdev)
673{
674 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
675 int tx_subcrqs;
676 int i, rc;
677
678 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
679 adapter->tx_pool = kcalloc(tx_subcrqs,
680 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
681 if (!adapter->tx_pool)
682 return -1;
683
684 adapter->tso_pool = kcalloc(tx_subcrqs,
685 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
686 if (!adapter->tso_pool)
687 return -1;
688
689 adapter->num_active_tx_pools = tx_subcrqs;
690
691 for (i = 0; i < tx_subcrqs; i++) {
692 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
693 adapter->req_tx_entries_per_subcrq,
694 adapter->req_mtu + VLAN_HLEN);
695 if (rc) {
696 release_tx_pools(adapter);
697 return rc;
698 }
699
700 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
701 IBMVNIC_TSO_BUFS,
702 IBMVNIC_TSO_BUF_SZ);
703 if (rc) {
704 release_tx_pools(adapter);
705 return rc;
706 }
707 }
708
709 return 0;
710}
711
712static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
713{
714 int i;
715
716 if (adapter->napi_enabled)
717 return;
718
719 for (i = 0; i < adapter->req_rx_queues; i++)
720 napi_enable(&adapter->napi[i]);
721
722 adapter->napi_enabled = true;
723}
724
725static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
726{
727 int i;
728
729 if (!adapter->napi_enabled)
730 return;
731
732 for (i = 0; i < adapter->req_rx_queues; i++) {
733 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
734 napi_disable(&adapter->napi[i]);
735 }
736
737 adapter->napi_enabled = false;
738}
739
740static int init_napi(struct ibmvnic_adapter *adapter)
741{
742 int i;
743
744 adapter->napi = kcalloc(adapter->req_rx_queues,
745 sizeof(struct napi_struct), GFP_KERNEL);
746 if (!adapter->napi)
747 return -ENOMEM;
748
749 for (i = 0; i < adapter->req_rx_queues; i++) {
750 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
751 netif_napi_add(adapter->netdev, &adapter->napi[i],
752 ibmvnic_poll, NAPI_POLL_WEIGHT);
753 }
754
755 adapter->num_active_rx_napi = adapter->req_rx_queues;
756 return 0;
757}
758
759static void release_napi(struct ibmvnic_adapter *adapter)
760{
761 int i;
762
763 if (!adapter->napi)
764 return;
765
766 for (i = 0; i < adapter->num_active_rx_napi; i++) {
767 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
768 netif_napi_del(&adapter->napi[i]);
769 }
770
771 kfree(adapter->napi);
772 adapter->napi = NULL;
773 adapter->num_active_rx_napi = 0;
774 adapter->napi_enabled = false;
775}
776
777static int ibmvnic_login(struct net_device *netdev)
778{
779 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
780 unsigned long timeout = msecs_to_jiffies(30000);
781 int retry_count = 0;
782 bool retry;
783 int rc;
784
785 do {
786 retry = false;
787 if (retry_count > IBMVNIC_MAX_QUEUES) {
788 netdev_warn(netdev, "Login attempts exceeded\n");
789 return -1;
790 }
791
792 adapter->init_done_rc = 0;
793 reinit_completion(&adapter->init_done);
794 rc = send_login(adapter);
795 if (rc) {
796 netdev_warn(netdev, "Unable to login\n");
797 return rc;
798 }
799
800 if (!wait_for_completion_timeout(&adapter->init_done,
801 timeout)) {
802 netdev_warn(netdev, "Login timed out\n");
803 return -1;
804 }
805
806 if (adapter->init_done_rc == PARTIALSUCCESS) {
807 retry_count++;
808 release_sub_crqs(adapter, 1);
809
810 retry = true;
811 netdev_dbg(netdev,
812 "Received partial success, retrying...\n");
813 adapter->init_done_rc = 0;
814 reinit_completion(&adapter->init_done);
815 send_cap_queries(adapter);
816 if (!wait_for_completion_timeout(&adapter->init_done,
817 timeout)) {
818 netdev_warn(netdev,
819 "Capabilities query timed out\n");
820 return -1;
821 }
822
823 rc = init_sub_crqs(adapter);
824 if (rc) {
825 netdev_warn(netdev,
826 "SCRQ initialization failed\n");
827 return -1;
828 }
829
830 rc = init_sub_crq_irqs(adapter);
831 if (rc) {
832 netdev_warn(netdev,
833 "SCRQ irq initialization failed\n");
834 return -1;
835 }
836 } else if (adapter->init_done_rc) {
837 netdev_warn(netdev, "Adapter login failed\n");
838 return -1;
839 }
840 } while (retry);
841
842 __ibmvnic_set_mac(netdev, adapter->mac_addr);
843
844 return 0;
845}
846
847static void release_login_buffer(struct ibmvnic_adapter *adapter)
848{
849 kfree(adapter->login_buf);
850 adapter->login_buf = NULL;
851}
852
853static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
854{
855 kfree(adapter->login_rsp_buf);
856 adapter->login_rsp_buf = NULL;
857}
858
859static void release_resources(struct ibmvnic_adapter *adapter)
860{
861 release_vpd_data(adapter);
862
863 release_tx_pools(adapter);
864 release_rx_pools(adapter);
865
866 release_napi(adapter);
867 release_login_rsp_buffer(adapter);
868}
869
870static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
871{
872 struct net_device *netdev = adapter->netdev;
873 unsigned long timeout = msecs_to_jiffies(30000);
874 union ibmvnic_crq crq;
875 bool resend;
876 int rc;
877
878 netdev_dbg(netdev, "setting link state %d\n", link_state);
879
880 memset(&crq, 0, sizeof(crq));
881 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
882 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
883 crq.logical_link_state.link_state = link_state;
884
885 do {
886 resend = false;
887
888 reinit_completion(&adapter->init_done);
889 rc = ibmvnic_send_crq(adapter, &crq);
890 if (rc) {
891 netdev_err(netdev, "Failed to set link state\n");
892 return rc;
893 }
894
895 if (!wait_for_completion_timeout(&adapter->init_done,
896 timeout)) {
897 netdev_err(netdev, "timeout setting link state\n");
898 return -1;
899 }
900
901 if (adapter->init_done_rc == 1) {
902
903 mdelay(1000);
904 resend = true;
905 } else if (adapter->init_done_rc) {
906 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
907 adapter->init_done_rc);
908 return adapter->init_done_rc;
909 }
910 } while (resend);
911
912 return 0;
913}
914
915static int set_real_num_queues(struct net_device *netdev)
916{
917 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
918 int rc;
919
920 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
921 adapter->req_tx_queues, adapter->req_rx_queues);
922
923 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
924 if (rc) {
925 netdev_err(netdev, "failed to set the number of tx queues\n");
926 return rc;
927 }
928
929 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
930 if (rc)
931 netdev_err(netdev, "failed to set the number of rx queues\n");
932
933 return rc;
934}
935
936static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
937{
938 struct device *dev = &adapter->vdev->dev;
939 union ibmvnic_crq crq;
940 int len = 0;
941 int rc;
942
943 if (adapter->vpd->buff)
944 len = adapter->vpd->len;
945
946 init_completion(&adapter->fw_done);
947 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
948 crq.get_vpd_size.cmd = GET_VPD_SIZE;
949 rc = ibmvnic_send_crq(adapter, &crq);
950 if (rc)
951 return rc;
952 wait_for_completion(&adapter->fw_done);
953
954 if (!adapter->vpd->len)
955 return -ENODATA;
956
957 if (!adapter->vpd->buff)
958 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
959 else if (adapter->vpd->len != len)
960 adapter->vpd->buff =
961 krealloc(adapter->vpd->buff,
962 adapter->vpd->len, GFP_KERNEL);
963
964 if (!adapter->vpd->buff) {
965 dev_err(dev, "Could allocate VPD buffer\n");
966 return -ENOMEM;
967 }
968
969 adapter->vpd->dma_addr =
970 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
971 DMA_FROM_DEVICE);
972 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
973 dev_err(dev, "Could not map VPD buffer\n");
974 kfree(adapter->vpd->buff);
975 adapter->vpd->buff = NULL;
976 return -ENOMEM;
977 }
978
979 reinit_completion(&adapter->fw_done);
980 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
981 crq.get_vpd.cmd = GET_VPD;
982 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
983 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
984 rc = ibmvnic_send_crq(adapter, &crq);
985 if (rc) {
986 kfree(adapter->vpd->buff);
987 adapter->vpd->buff = NULL;
988 return rc;
989 }
990 wait_for_completion(&adapter->fw_done);
991
992 return 0;
993}
994
995static int init_resources(struct ibmvnic_adapter *adapter)
996{
997 struct net_device *netdev = adapter->netdev;
998 int rc;
999
1000 rc = set_real_num_queues(netdev);
1001 if (rc)
1002 return rc;
1003
1004 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1005 if (!adapter->vpd)
1006 return -ENOMEM;
1007
1008
1009 rc = ibmvnic_get_vpd(adapter);
1010 if (rc) {
1011 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1012 return rc;
1013 }
1014
1015 adapter->map_id = 1;
1016
1017 rc = init_napi(adapter);
1018 if (rc)
1019 return rc;
1020
1021 send_map_query(adapter);
1022
1023 rc = init_rx_pools(netdev);
1024 if (rc)
1025 return rc;
1026
1027 rc = init_tx_pools(netdev);
1028 return rc;
1029}
1030
1031static int __ibmvnic_open(struct net_device *netdev)
1032{
1033 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1034 enum vnic_state prev_state = adapter->state;
1035 int i, rc;
1036
1037 adapter->state = VNIC_OPENING;
1038 replenish_pools(adapter);
1039 ibmvnic_napi_enable(adapter);
1040
1041
1042
1043
1044 for (i = 0; i < adapter->req_rx_queues; i++) {
1045 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1046 if (prev_state == VNIC_CLOSED)
1047 enable_irq(adapter->rx_scrq[i]->irq);
1048 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1049 }
1050
1051 for (i = 0; i < adapter->req_tx_queues; i++) {
1052 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1053 if (prev_state == VNIC_CLOSED)
1054 enable_irq(adapter->tx_scrq[i]->irq);
1055 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1056 }
1057
1058 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1059 if (rc) {
1060 for (i = 0; i < adapter->req_rx_queues; i++)
1061 napi_disable(&adapter->napi[i]);
1062 release_resources(adapter);
1063 return rc;
1064 }
1065
1066 netif_tx_start_all_queues(netdev);
1067
1068 if (prev_state == VNIC_CLOSED) {
1069 for (i = 0; i < adapter->req_rx_queues; i++)
1070 napi_schedule(&adapter->napi[i]);
1071 }
1072
1073 adapter->state = VNIC_OPEN;
1074 return rc;
1075}
1076
1077static int ibmvnic_open(struct net_device *netdev)
1078{
1079 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1080 int rc;
1081
1082
1083
1084
1085 if (adapter->failover_pending) {
1086 adapter->state = VNIC_OPEN;
1087 return 0;
1088 }
1089
1090 if (adapter->state != VNIC_CLOSED) {
1091 rc = ibmvnic_login(netdev);
1092 if (rc)
1093 return rc;
1094
1095 rc = init_resources(adapter);
1096 if (rc) {
1097 netdev_err(netdev, "failed to initialize resources\n");
1098 release_resources(adapter);
1099 return rc;
1100 }
1101 }
1102
1103 rc = __ibmvnic_open(netdev);
1104
1105 return rc;
1106}
1107
1108static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1109{
1110 struct ibmvnic_rx_pool *rx_pool;
1111 struct ibmvnic_rx_buff *rx_buff;
1112 u64 rx_entries;
1113 int rx_scrqs;
1114 int i, j;
1115
1116 if (!adapter->rx_pool)
1117 return;
1118
1119 rx_scrqs = adapter->num_active_rx_pools;
1120 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1121
1122
1123 for (i = 0; i < rx_scrqs; i++) {
1124 rx_pool = &adapter->rx_pool[i];
1125 if (!rx_pool || !rx_pool->rx_buff)
1126 continue;
1127
1128 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1129 for (j = 0; j < rx_entries; j++) {
1130 rx_buff = &rx_pool->rx_buff[j];
1131 if (rx_buff && rx_buff->skb) {
1132 dev_kfree_skb_any(rx_buff->skb);
1133 rx_buff->skb = NULL;
1134 }
1135 }
1136 }
1137}
1138
1139static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1140 struct ibmvnic_tx_pool *tx_pool)
1141{
1142 struct ibmvnic_tx_buff *tx_buff;
1143 u64 tx_entries;
1144 int i;
1145
1146 if (!tx_pool || !tx_pool->tx_buff)
1147 return;
1148
1149 tx_entries = tx_pool->num_buffers;
1150
1151 for (i = 0; i < tx_entries; i++) {
1152 tx_buff = &tx_pool->tx_buff[i];
1153 if (tx_buff && tx_buff->skb) {
1154 dev_kfree_skb_any(tx_buff->skb);
1155 tx_buff->skb = NULL;
1156 }
1157 }
1158}
1159
1160static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1161{
1162 int tx_scrqs;
1163 int i;
1164
1165 if (!adapter->tx_pool || !adapter->tso_pool)
1166 return;
1167
1168 tx_scrqs = adapter->num_active_tx_pools;
1169
1170
1171 for (i = 0; i < tx_scrqs; i++) {
1172 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1173 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1174 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1175 }
1176}
1177
1178static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1179{
1180 struct net_device *netdev = adapter->netdev;
1181 int i;
1182
1183 if (adapter->tx_scrq) {
1184 for (i = 0; i < adapter->req_tx_queues; i++)
1185 if (adapter->tx_scrq[i]->irq) {
1186 netdev_dbg(netdev,
1187 "Disabling tx_scrq[%d] irq\n", i);
1188 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1189 disable_irq(adapter->tx_scrq[i]->irq);
1190 }
1191 }
1192
1193 if (adapter->rx_scrq) {
1194 for (i = 0; i < adapter->req_rx_queues; i++) {
1195 if (adapter->rx_scrq[i]->irq) {
1196 netdev_dbg(netdev,
1197 "Disabling rx_scrq[%d] irq\n", i);
1198 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1199 disable_irq(adapter->rx_scrq[i]->irq);
1200 }
1201 }
1202 }
1203}
1204
1205static void ibmvnic_cleanup(struct net_device *netdev)
1206{
1207 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1208
1209
1210 if (adapter->resetting)
1211 netif_tx_disable(netdev);
1212 else
1213 netif_tx_stop_all_queues(netdev);
1214
1215 ibmvnic_napi_disable(adapter);
1216 ibmvnic_disable_irqs(adapter);
1217
1218 clean_rx_pools(adapter);
1219 clean_tx_pools(adapter);
1220}
1221
1222static int __ibmvnic_close(struct net_device *netdev)
1223{
1224 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1225 int rc = 0;
1226
1227 adapter->state = VNIC_CLOSING;
1228 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1229 if (rc)
1230 return rc;
1231 adapter->state = VNIC_CLOSED;
1232 return 0;
1233}
1234
1235static int ibmvnic_close(struct net_device *netdev)
1236{
1237 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1238 int rc;
1239
1240
1241
1242
1243 if (adapter->failover_pending) {
1244 adapter->state = VNIC_CLOSED;
1245 return 0;
1246 }
1247
1248 rc = __ibmvnic_close(netdev);
1249 ibmvnic_cleanup(netdev);
1250
1251 return rc;
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1266 int *hdr_len, u8 *hdr_data)
1267{
1268 int len = 0;
1269 u8 *hdr;
1270
1271 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1272 hdr_len[0] = sizeof(struct vlan_ethhdr);
1273 else
1274 hdr_len[0] = sizeof(struct ethhdr);
1275
1276 if (skb->protocol == htons(ETH_P_IP)) {
1277 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1278 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1279 hdr_len[2] = tcp_hdrlen(skb);
1280 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1281 hdr_len[2] = sizeof(struct udphdr);
1282 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1283 hdr_len[1] = sizeof(struct ipv6hdr);
1284 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1285 hdr_len[2] = tcp_hdrlen(skb);
1286 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1287 hdr_len[2] = sizeof(struct udphdr);
1288 } else if (skb->protocol == htons(ETH_P_ARP)) {
1289 hdr_len[1] = arp_hdr_len(skb->dev);
1290 hdr_len[2] = 0;
1291 }
1292
1293 memset(hdr_data, 0, 120);
1294 if ((hdr_field >> 6) & 1) {
1295 hdr = skb_mac_header(skb);
1296 memcpy(hdr_data, hdr, hdr_len[0]);
1297 len += hdr_len[0];
1298 }
1299
1300 if ((hdr_field >> 5) & 1) {
1301 hdr = skb_network_header(skb);
1302 memcpy(hdr_data + len, hdr, hdr_len[1]);
1303 len += hdr_len[1];
1304 }
1305
1306 if ((hdr_field >> 4) & 1) {
1307 hdr = skb_transport_header(skb);
1308 memcpy(hdr_data + len, hdr, hdr_len[2]);
1309 len += hdr_len[2];
1310 }
1311 return len;
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1327 union sub_crq *scrq_arr)
1328{
1329 union sub_crq hdr_desc;
1330 int tmp_len = len;
1331 int num_descs = 0;
1332 u8 *data, *cur;
1333 int tmp;
1334
1335 while (tmp_len > 0) {
1336 cur = hdr_data + len - tmp_len;
1337
1338 memset(&hdr_desc, 0, sizeof(hdr_desc));
1339 if (cur != hdr_data) {
1340 data = hdr_desc.hdr_ext.data;
1341 tmp = tmp_len > 29 ? 29 : tmp_len;
1342 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1343 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1344 hdr_desc.hdr_ext.len = tmp;
1345 } else {
1346 data = hdr_desc.hdr.data;
1347 tmp = tmp_len > 24 ? 24 : tmp_len;
1348 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1349 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1350 hdr_desc.hdr.len = tmp;
1351 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1352 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1353 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1354 hdr_desc.hdr.flag = hdr_field << 1;
1355 }
1356 memcpy(data, cur, tmp);
1357 tmp_len -= tmp;
1358 *scrq_arr = hdr_desc;
1359 scrq_arr++;
1360 num_descs++;
1361 }
1362
1363 return num_descs;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1378 int *num_entries, u8 hdr_field)
1379{
1380 int hdr_len[3] = {0, 0, 0};
1381 int tot_len;
1382 u8 *hdr_data = txbuff->hdr_data;
1383
1384 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1385 txbuff->hdr_data);
1386 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1387 txbuff->indir_arr + 1);
1388}
1389
1390static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1391 struct net_device *netdev)
1392{
1393
1394
1395
1396
1397
1398
1399 if (skb->len < netdev->min_mtu)
1400 return skb_put_padto(skb, netdev->min_mtu);
1401
1402 return 0;
1403}
1404
1405static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1406{
1407 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1408 int queue_num = skb_get_queue_mapping(skb);
1409 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1410 struct device *dev = &adapter->vdev->dev;
1411 struct ibmvnic_tx_buff *tx_buff = NULL;
1412 struct ibmvnic_sub_crq_queue *tx_scrq;
1413 struct ibmvnic_tx_pool *tx_pool;
1414 unsigned int tx_send_failed = 0;
1415 unsigned int tx_map_failed = 0;
1416 unsigned int tx_dropped = 0;
1417 unsigned int tx_packets = 0;
1418 unsigned int tx_bytes = 0;
1419 dma_addr_t data_dma_addr;
1420 struct netdev_queue *txq;
1421 unsigned long lpar_rc;
1422 union sub_crq tx_crq;
1423 unsigned int offset;
1424 int num_entries = 1;
1425 unsigned char *dst;
1426 u64 *handle_array;
1427 int index = 0;
1428 u8 proto = 0;
1429 netdev_tx_t ret = NETDEV_TX_OK;
1430
1431 if (adapter->resetting) {
1432 if (!netif_subqueue_stopped(netdev, skb))
1433 netif_stop_subqueue(netdev, queue_num);
1434 dev_kfree_skb_any(skb);
1435
1436 tx_send_failed++;
1437 tx_dropped++;
1438 ret = NETDEV_TX_OK;
1439 goto out;
1440 }
1441
1442 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1443 tx_dropped++;
1444 tx_send_failed++;
1445 ret = NETDEV_TX_OK;
1446 goto out;
1447 }
1448 if (skb_is_gso(skb))
1449 tx_pool = &adapter->tso_pool[queue_num];
1450 else
1451 tx_pool = &adapter->tx_pool[queue_num];
1452
1453 tx_scrq = adapter->tx_scrq[queue_num];
1454 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1455 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1456 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1457
1458 index = tx_pool->free_map[tx_pool->consumer_index];
1459
1460 if (index == IBMVNIC_INVALID_MAP) {
1461 dev_kfree_skb_any(skb);
1462 tx_send_failed++;
1463 tx_dropped++;
1464 ret = NETDEV_TX_OK;
1465 goto out;
1466 }
1467
1468 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1469
1470 offset = index * tx_pool->buf_size;
1471 dst = tx_pool->long_term_buff.buff + offset;
1472 memset(dst, 0, tx_pool->buf_size);
1473 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1474
1475 if (skb_shinfo(skb)->nr_frags) {
1476 int cur, i;
1477
1478
1479 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1480 cur = skb_headlen(skb);
1481
1482
1483 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1484 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1485
1486 memcpy(dst + cur,
1487 page_address(skb_frag_page(frag)) +
1488 frag->page_offset, skb_frag_size(frag));
1489 cur += skb_frag_size(frag);
1490 }
1491 } else {
1492 skb_copy_from_linear_data(skb, dst, skb->len);
1493 }
1494
1495 tx_pool->consumer_index =
1496 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1497
1498 tx_buff = &tx_pool->tx_buff[index];
1499 tx_buff->skb = skb;
1500 tx_buff->data_dma[0] = data_dma_addr;
1501 tx_buff->data_len[0] = skb->len;
1502 tx_buff->index = index;
1503 tx_buff->pool_index = queue_num;
1504 tx_buff->last_frag = true;
1505
1506 memset(&tx_crq, 0, sizeof(tx_crq));
1507 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1508 tx_crq.v1.type = IBMVNIC_TX_DESC;
1509 tx_crq.v1.n_crq_elem = 1;
1510 tx_crq.v1.n_sge = 1;
1511 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1512
1513 if (skb_is_gso(skb))
1514 tx_crq.v1.correlator =
1515 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1516 else
1517 tx_crq.v1.correlator = cpu_to_be32(index);
1518 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1519 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1520 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1521
1522 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1523 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1524 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1525 }
1526
1527 if (skb->protocol == htons(ETH_P_IP)) {
1528 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1529 proto = ip_hdr(skb)->protocol;
1530 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1531 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1532 proto = ipv6_hdr(skb)->nexthdr;
1533 }
1534
1535 if (proto == IPPROTO_TCP)
1536 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1537 else if (proto == IPPROTO_UDP)
1538 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1539
1540 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1541 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1542 hdrs += 2;
1543 }
1544 if (skb_is_gso(skb)) {
1545 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1546 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1547 hdrs += 2;
1548 }
1549
1550 if ((*hdrs >> 7) & 1) {
1551 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1552 tx_crq.v1.n_crq_elem = num_entries;
1553 tx_buff->num_entries = num_entries;
1554 tx_buff->indir_arr[0] = tx_crq;
1555 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1556 sizeof(tx_buff->indir_arr),
1557 DMA_TO_DEVICE);
1558 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1559 dev_kfree_skb_any(skb);
1560 tx_buff->skb = NULL;
1561 if (!firmware_has_feature(FW_FEATURE_CMO))
1562 dev_err(dev, "tx: unable to map descriptor array\n");
1563 tx_map_failed++;
1564 tx_dropped++;
1565 ret = NETDEV_TX_OK;
1566 goto tx_err_out;
1567 }
1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 (u64)tx_buff->indir_dma,
1570 (u64)num_entries);
1571 dma_unmap_single(dev, tx_buff->indir_dma,
1572 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1573 } else {
1574 tx_buff->num_entries = num_entries;
1575 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1576 &tx_crq);
1577 }
1578 if (lpar_rc != H_SUCCESS) {
1579 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1580 dev_err_ratelimited(dev, "tx: send failed\n");
1581 dev_kfree_skb_any(skb);
1582 tx_buff->skb = NULL;
1583
1584 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1585
1586
1587
1588
1589
1590 netif_tx_stop_all_queues(netdev);
1591 netif_carrier_off(netdev);
1592 }
1593
1594 tx_send_failed++;
1595 tx_dropped++;
1596 ret = NETDEV_TX_OK;
1597 goto tx_err_out;
1598 }
1599
1600 if (atomic_add_return(num_entries, &tx_scrq->used)
1601 >= adapter->req_tx_entries_per_subcrq) {
1602 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1603 netif_stop_subqueue(netdev, queue_num);
1604 }
1605
1606 tx_packets++;
1607 tx_bytes += skb->len;
1608 txq->trans_start = jiffies;
1609 ret = NETDEV_TX_OK;
1610 goto out;
1611
1612tx_err_out:
1613
1614 if (tx_pool->consumer_index == 0)
1615 tx_pool->consumer_index =
1616 tx_pool->num_buffers - 1;
1617 else
1618 tx_pool->consumer_index--;
1619 tx_pool->free_map[tx_pool->consumer_index] = index;
1620out:
1621 netdev->stats.tx_dropped += tx_dropped;
1622 netdev->stats.tx_bytes += tx_bytes;
1623 netdev->stats.tx_packets += tx_packets;
1624 adapter->tx_send_failed += tx_send_failed;
1625 adapter->tx_map_failed += tx_map_failed;
1626 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1627 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1628 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1629
1630 return ret;
1631}
1632
1633static void ibmvnic_set_multi(struct net_device *netdev)
1634{
1635 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1636 struct netdev_hw_addr *ha;
1637 union ibmvnic_crq crq;
1638
1639 memset(&crq, 0, sizeof(crq));
1640 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1641 crq.request_capability.cmd = REQUEST_CAPABILITY;
1642
1643 if (netdev->flags & IFF_PROMISC) {
1644 if (!adapter->promisc_supported)
1645 return;
1646 } else {
1647 if (netdev->flags & IFF_ALLMULTI) {
1648
1649 memset(&crq, 0, sizeof(crq));
1650 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1651 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1652 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1653 ibmvnic_send_crq(adapter, &crq);
1654 } else if (netdev_mc_empty(netdev)) {
1655
1656 memset(&crq, 0, sizeof(crq));
1657 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1658 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1659 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1660 ibmvnic_send_crq(adapter, &crq);
1661 } else {
1662
1663 netdev_for_each_mc_addr(ha, netdev) {
1664 memset(&crq, 0, sizeof(crq));
1665 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1666 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1667 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1668 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1669 ha->addr);
1670 ibmvnic_send_crq(adapter, &crq);
1671 }
1672 }
1673 }
1674}
1675
1676static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1677{
1678 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1679 union ibmvnic_crq crq;
1680 int rc;
1681
1682 if (!is_valid_ether_addr(dev_addr)) {
1683 rc = -EADDRNOTAVAIL;
1684 goto err;
1685 }
1686
1687 memset(&crq, 0, sizeof(crq));
1688 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1689 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1690 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1691
1692 init_completion(&adapter->fw_done);
1693 rc = ibmvnic_send_crq(adapter, &crq);
1694 if (rc) {
1695 rc = -EIO;
1696 goto err;
1697 }
1698
1699 wait_for_completion(&adapter->fw_done);
1700
1701 if (adapter->fw_done_rc) {
1702 rc = -EIO;
1703 goto err;
1704 }
1705
1706 return 0;
1707err:
1708 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1709 return rc;
1710}
1711
1712static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1713{
1714 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1715 struct sockaddr *addr = p;
1716 int rc;
1717
1718 rc = 0;
1719 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1720 if (adapter->state != VNIC_PROBED)
1721 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1722
1723 return rc;
1724}
1725
1726
1727
1728
1729
1730static int do_reset(struct ibmvnic_adapter *adapter,
1731 struct ibmvnic_rwi *rwi, u32 reset_state)
1732{
1733 u64 old_num_rx_queues, old_num_tx_queues;
1734 u64 old_num_rx_slots, old_num_tx_slots;
1735 struct net_device *netdev = adapter->netdev;
1736 int i, rc;
1737
1738 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1739 rwi->reset_reason);
1740
1741 netif_carrier_off(netdev);
1742 adapter->reset_reason = rwi->reset_reason;
1743
1744 old_num_rx_queues = adapter->req_rx_queues;
1745 old_num_tx_queues = adapter->req_tx_queues;
1746 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1747 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1748
1749 ibmvnic_cleanup(netdev);
1750
1751 if (reset_state == VNIC_OPEN &&
1752 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1753 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1754 rc = __ibmvnic_close(netdev);
1755 if (rc)
1756 return rc;
1757 }
1758
1759 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1760 adapter->wait_for_reset) {
1761 release_resources(adapter);
1762 release_sub_crqs(adapter, 1);
1763 release_crq_queue(adapter);
1764 }
1765
1766 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1767
1768
1769
1770 adapter->state = VNIC_PROBED;
1771
1772 if (adapter->wait_for_reset) {
1773 rc = init_crq_queue(adapter);
1774 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1775 rc = ibmvnic_reenable_crq_queue(adapter);
1776 release_sub_crqs(adapter, 1);
1777 } else {
1778 rc = ibmvnic_reset_crq(adapter);
1779 if (!rc)
1780 rc = vio_enable_interrupts(adapter->vdev);
1781 }
1782
1783 if (rc) {
1784 netdev_err(adapter->netdev,
1785 "Couldn't initialize crq. rc=%d\n", rc);
1786 return rc;
1787 }
1788
1789 rc = ibmvnic_reset_init(adapter);
1790 if (rc)
1791 return IBMVNIC_INIT_FAILED;
1792
1793
1794
1795
1796 if (reset_state == VNIC_PROBED)
1797 return 0;
1798
1799 rc = ibmvnic_login(netdev);
1800 if (rc) {
1801 adapter->state = reset_state;
1802 return rc;
1803 }
1804
1805 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1806 adapter->wait_for_reset) {
1807 rc = init_resources(adapter);
1808 if (rc)
1809 return rc;
1810 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1811 adapter->req_tx_queues != old_num_tx_queues ||
1812 adapter->req_rx_add_entries_per_subcrq !=
1813 old_num_rx_slots ||
1814 adapter->req_tx_entries_per_subcrq !=
1815 old_num_tx_slots) {
1816 release_rx_pools(adapter);
1817 release_tx_pools(adapter);
1818 release_napi(adapter);
1819 release_vpd_data(adapter);
1820
1821 rc = init_resources(adapter);
1822 if (rc)
1823 return rc;
1824
1825 } else {
1826 rc = reset_tx_pools(adapter);
1827 if (rc)
1828 return rc;
1829
1830 rc = reset_rx_pools(adapter);
1831 if (rc)
1832 return rc;
1833 }
1834 ibmvnic_disable_irqs(adapter);
1835 }
1836 adapter->state = VNIC_CLOSED;
1837
1838 if (reset_state == VNIC_CLOSED)
1839 return 0;
1840
1841 rc = __ibmvnic_open(netdev);
1842 if (rc) {
1843 if (list_empty(&adapter->rwi_list))
1844 adapter->state = VNIC_CLOSED;
1845 else
1846 adapter->state = reset_state;
1847
1848 return 0;
1849 }
1850
1851
1852 ibmvnic_set_multi(netdev);
1853
1854
1855 for (i = 0; i < adapter->req_rx_queues; i++)
1856 napi_schedule(&adapter->napi[i]);
1857
1858 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1859 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1860 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1861
1862 return 0;
1863}
1864
1865static int do_hard_reset(struct ibmvnic_adapter *adapter,
1866 struct ibmvnic_rwi *rwi, u32 reset_state)
1867{
1868 struct net_device *netdev = adapter->netdev;
1869 int rc;
1870
1871 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1872 rwi->reset_reason);
1873
1874 netif_carrier_off(netdev);
1875 adapter->reset_reason = rwi->reset_reason;
1876
1877 ibmvnic_cleanup(netdev);
1878 release_resources(adapter);
1879 release_sub_crqs(adapter, 0);
1880 release_crq_queue(adapter);
1881
1882
1883
1884
1885 adapter->state = VNIC_PROBED;
1886
1887 reinit_completion(&adapter->init_done);
1888 rc = init_crq_queue(adapter);
1889 if (rc) {
1890 netdev_err(adapter->netdev,
1891 "Couldn't initialize crq. rc=%d\n", rc);
1892 return rc;
1893 }
1894
1895 rc = ibmvnic_init(adapter);
1896 if (rc)
1897 return rc;
1898
1899
1900
1901
1902 if (reset_state == VNIC_PROBED)
1903 return 0;
1904
1905 rc = ibmvnic_login(netdev);
1906 if (rc) {
1907 adapter->state = VNIC_PROBED;
1908 return 0;
1909 }
1910
1911 rc = init_resources(adapter);
1912 if (rc)
1913 return rc;
1914
1915 ibmvnic_disable_irqs(adapter);
1916 adapter->state = VNIC_CLOSED;
1917
1918 if (reset_state == VNIC_CLOSED)
1919 return 0;
1920
1921 rc = __ibmvnic_open(netdev);
1922 if (rc) {
1923 if (list_empty(&adapter->rwi_list))
1924 adapter->state = VNIC_CLOSED;
1925 else
1926 adapter->state = reset_state;
1927
1928 return 0;
1929 }
1930
1931 return 0;
1932}
1933
1934static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1935{
1936 struct ibmvnic_rwi *rwi;
1937 unsigned long flags;
1938
1939 spin_lock_irqsave(&adapter->rwi_lock, flags);
1940
1941 if (!list_empty(&adapter->rwi_list)) {
1942 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1943 list);
1944 list_del(&rwi->list);
1945 } else {
1946 rwi = NULL;
1947 }
1948
1949 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
1950 return rwi;
1951}
1952
1953static void free_all_rwi(struct ibmvnic_adapter *adapter)
1954{
1955 struct ibmvnic_rwi *rwi;
1956
1957 rwi = get_next_rwi(adapter);
1958 while (rwi) {
1959 kfree(rwi);
1960 rwi = get_next_rwi(adapter);
1961 }
1962}
1963
1964static void __ibmvnic_reset(struct work_struct *work)
1965{
1966 struct ibmvnic_rwi *rwi;
1967 struct ibmvnic_adapter *adapter;
1968 bool we_lock_rtnl = false;
1969 u32 reset_state;
1970 int rc = 0;
1971
1972 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1973
1974
1975
1976
1977
1978 if (!adapter->wait_for_reset) {
1979 rtnl_lock();
1980 we_lock_rtnl = true;
1981 }
1982 reset_state = adapter->state;
1983
1984 rwi = get_next_rwi(adapter);
1985 while (rwi) {
1986 if (adapter->state == VNIC_REMOVING ||
1987 adapter->state == VNIC_REMOVED) {
1988 kfree(rwi);
1989 rc = EBUSY;
1990 break;
1991 }
1992
1993 if (adapter->force_reset_recovery) {
1994 adapter->force_reset_recovery = false;
1995 rc = do_hard_reset(adapter, rwi, reset_state);
1996 } else {
1997 rc = do_reset(adapter, rwi, reset_state);
1998 }
1999 kfree(rwi);
2000 if (rc && rc != IBMVNIC_INIT_FAILED &&
2001 !adapter->force_reset_recovery)
2002 break;
2003
2004 rwi = get_next_rwi(adapter);
2005 }
2006
2007 if (adapter->wait_for_reset) {
2008 adapter->wait_for_reset = false;
2009 adapter->reset_done_rc = rc;
2010 complete(&adapter->reset_done);
2011 }
2012
2013 if (rc) {
2014 netdev_dbg(adapter->netdev, "Reset failed\n");
2015 free_all_rwi(adapter);
2016 }
2017
2018 adapter->resetting = false;
2019 if (we_lock_rtnl)
2020 rtnl_unlock();
2021}
2022
2023static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2024 enum ibmvnic_reset_reason reason)
2025{
2026 struct list_head *entry, *tmp_entry;
2027 struct ibmvnic_rwi *rwi, *tmp;
2028 struct net_device *netdev = adapter->netdev;
2029 unsigned long flags;
2030 int ret;
2031
2032 if (adapter->state == VNIC_REMOVING ||
2033 adapter->state == VNIC_REMOVED ||
2034 adapter->failover_pending) {
2035 ret = EBUSY;
2036 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2037 goto err;
2038 }
2039
2040 if (adapter->state == VNIC_PROBING) {
2041 netdev_warn(netdev, "Adapter reset during probe\n");
2042 ret = adapter->init_done_rc = EAGAIN;
2043 goto err;
2044 }
2045
2046 spin_lock_irqsave(&adapter->rwi_lock, flags);
2047
2048 list_for_each(entry, &adapter->rwi_list) {
2049 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2050 if (tmp->reset_reason == reason) {
2051 netdev_dbg(netdev, "Skipping matching reset\n");
2052 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2053 ret = EBUSY;
2054 goto err;
2055 }
2056 }
2057
2058 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2059 if (!rwi) {
2060 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2061 ibmvnic_close(netdev);
2062 ret = ENOMEM;
2063 goto err;
2064 }
2065
2066
2067
2068 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2069 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2070 list_del(entry);
2071 }
2072 rwi->reset_reason = reason;
2073 list_add_tail(&rwi->list, &adapter->rwi_list);
2074 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2075 adapter->resetting = true;
2076 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2077 schedule_work(&adapter->ibmvnic_reset);
2078
2079 return 0;
2080err:
2081 if (adapter->wait_for_reset)
2082 adapter->wait_for_reset = false;
2083 return -ret;
2084}
2085
2086static void ibmvnic_tx_timeout(struct net_device *dev)
2087{
2088 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2089
2090 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2091}
2092
2093static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2094 struct ibmvnic_rx_buff *rx_buff)
2095{
2096 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2097
2098 rx_buff->skb = NULL;
2099
2100 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2101 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2102
2103 atomic_dec(&pool->available);
2104}
2105
2106static int ibmvnic_poll(struct napi_struct *napi, int budget)
2107{
2108 struct net_device *netdev = napi->dev;
2109 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2110 int scrq_num = (int)(napi - adapter->napi);
2111 int frames_processed = 0;
2112
2113restart_poll:
2114 while (frames_processed < budget) {
2115 struct sk_buff *skb;
2116 struct ibmvnic_rx_buff *rx_buff;
2117 union sub_crq *next;
2118 u32 length;
2119 u16 offset;
2120 u8 flags = 0;
2121
2122 if (unlikely(adapter->resetting &&
2123 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2124 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2125 napi_complete_done(napi, frames_processed);
2126 return frames_processed;
2127 }
2128
2129 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2130 break;
2131 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2132 rx_buff =
2133 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2134 rx_comp.correlator);
2135
2136 if (next->rx_comp.rc) {
2137 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2138 be16_to_cpu(next->rx_comp.rc));
2139
2140 next->rx_comp.first = 0;
2141 dev_kfree_skb_any(rx_buff->skb);
2142 remove_buff_from_pool(adapter, rx_buff);
2143 continue;
2144 } else if (!rx_buff->skb) {
2145
2146 next->rx_comp.first = 0;
2147 remove_buff_from_pool(adapter, rx_buff);
2148 continue;
2149 }
2150
2151 length = be32_to_cpu(next->rx_comp.len);
2152 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2153 flags = next->rx_comp.flags;
2154 skb = rx_buff->skb;
2155 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2156 length);
2157
2158
2159
2160
2161 if (adapter->rx_vlan_header_insertion &&
2162 (flags & IBMVNIC_VLAN_STRIPPED))
2163 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2164 ntohs(next->rx_comp.vlan_tci));
2165
2166
2167 next->rx_comp.first = 0;
2168 remove_buff_from_pool(adapter, rx_buff);
2169
2170 skb_put(skb, length);
2171 skb->protocol = eth_type_trans(skb, netdev);
2172 skb_record_rx_queue(skb, scrq_num);
2173
2174 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2175 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2176 skb->ip_summed = CHECKSUM_UNNECESSARY;
2177 }
2178
2179 length = skb->len;
2180 napi_gro_receive(napi, skb);
2181 netdev->stats.rx_packets++;
2182 netdev->stats.rx_bytes += length;
2183 adapter->rx_stats_buffers[scrq_num].packets++;
2184 adapter->rx_stats_buffers[scrq_num].bytes += length;
2185 frames_processed++;
2186 }
2187
2188 if (adapter->state != VNIC_CLOSING)
2189 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2190
2191 if (frames_processed < budget) {
2192 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2193 napi_complete_done(napi, frames_processed);
2194 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2195 napi_reschedule(napi)) {
2196 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2197 goto restart_poll;
2198 }
2199 }
2200 return frames_processed;
2201}
2202
2203static int wait_for_reset(struct ibmvnic_adapter *adapter)
2204{
2205 int rc, ret;
2206
2207 adapter->fallback.mtu = adapter->req_mtu;
2208 adapter->fallback.rx_queues = adapter->req_rx_queues;
2209 adapter->fallback.tx_queues = adapter->req_tx_queues;
2210 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2211 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2212
2213 init_completion(&adapter->reset_done);
2214 adapter->wait_for_reset = true;
2215 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2216 if (rc)
2217 return rc;
2218 wait_for_completion(&adapter->reset_done);
2219
2220 ret = 0;
2221 if (adapter->reset_done_rc) {
2222 ret = -EIO;
2223 adapter->desired.mtu = adapter->fallback.mtu;
2224 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2225 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2226 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2227 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2228
2229 init_completion(&adapter->reset_done);
2230 adapter->wait_for_reset = true;
2231 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2232 if (rc)
2233 return ret;
2234 wait_for_completion(&adapter->reset_done);
2235 }
2236 adapter->wait_for_reset = false;
2237
2238 return ret;
2239}
2240
2241static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2242{
2243 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2244
2245 adapter->desired.mtu = new_mtu + ETH_HLEN;
2246
2247 return wait_for_reset(adapter);
2248}
2249
2250static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2251 struct net_device *dev,
2252 netdev_features_t features)
2253{
2254
2255
2256
2257
2258 if (skb_is_gso(skb)) {
2259 if (skb_shinfo(skb)->gso_size < 224 ||
2260 skb_shinfo(skb)->gso_segs == 1)
2261 features &= ~NETIF_F_GSO_MASK;
2262 }
2263
2264 return features;
2265}
2266
2267static const struct net_device_ops ibmvnic_netdev_ops = {
2268 .ndo_open = ibmvnic_open,
2269 .ndo_stop = ibmvnic_close,
2270 .ndo_start_xmit = ibmvnic_xmit,
2271 .ndo_set_rx_mode = ibmvnic_set_multi,
2272 .ndo_set_mac_address = ibmvnic_set_mac,
2273 .ndo_validate_addr = eth_validate_addr,
2274 .ndo_tx_timeout = ibmvnic_tx_timeout,
2275 .ndo_change_mtu = ibmvnic_change_mtu,
2276 .ndo_features_check = ibmvnic_features_check,
2277};
2278
2279
2280
2281static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2282 struct ethtool_link_ksettings *cmd)
2283{
2284 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2285 int rc;
2286
2287 rc = send_query_phys_parms(adapter);
2288 if (rc) {
2289 adapter->speed = SPEED_UNKNOWN;
2290 adapter->duplex = DUPLEX_UNKNOWN;
2291 }
2292 cmd->base.speed = adapter->speed;
2293 cmd->base.duplex = adapter->duplex;
2294 cmd->base.port = PORT_FIBRE;
2295 cmd->base.phy_address = 0;
2296 cmd->base.autoneg = AUTONEG_ENABLE;
2297
2298 return 0;
2299}
2300
2301static void ibmvnic_get_drvinfo(struct net_device *netdev,
2302 struct ethtool_drvinfo *info)
2303{
2304 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2305
2306 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2307 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2308 strlcpy(info->fw_version, adapter->fw_version,
2309 sizeof(info->fw_version));
2310}
2311
2312static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2313{
2314 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2315
2316 return adapter->msg_enable;
2317}
2318
2319static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2320{
2321 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2322
2323 adapter->msg_enable = data;
2324}
2325
2326static u32 ibmvnic_get_link(struct net_device *netdev)
2327{
2328 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2329
2330
2331
2332
2333 return adapter->logical_link_state;
2334}
2335
2336static void ibmvnic_get_ringparam(struct net_device *netdev,
2337 struct ethtool_ringparam *ring)
2338{
2339 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2340
2341 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2342 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2343 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2344 } else {
2345 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2346 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2347 }
2348 ring->rx_mini_max_pending = 0;
2349 ring->rx_jumbo_max_pending = 0;
2350 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2351 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2352 ring->rx_mini_pending = 0;
2353 ring->rx_jumbo_pending = 0;
2354}
2355
2356static int ibmvnic_set_ringparam(struct net_device *netdev,
2357 struct ethtool_ringparam *ring)
2358{
2359 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2360 int ret;
2361
2362 ret = 0;
2363 adapter->desired.rx_entries = ring->rx_pending;
2364 adapter->desired.tx_entries = ring->tx_pending;
2365
2366 ret = wait_for_reset(adapter);
2367
2368 if (!ret &&
2369 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2370 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2371 netdev_info(netdev,
2372 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2373 ring->rx_pending, ring->tx_pending,
2374 adapter->req_rx_add_entries_per_subcrq,
2375 adapter->req_tx_entries_per_subcrq);
2376 return ret;
2377}
2378
2379static void ibmvnic_get_channels(struct net_device *netdev,
2380 struct ethtool_channels *channels)
2381{
2382 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2383
2384 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2385 channels->max_rx = adapter->max_rx_queues;
2386 channels->max_tx = adapter->max_tx_queues;
2387 } else {
2388 channels->max_rx = IBMVNIC_MAX_QUEUES;
2389 channels->max_tx = IBMVNIC_MAX_QUEUES;
2390 }
2391
2392 channels->max_other = 0;
2393 channels->max_combined = 0;
2394 channels->rx_count = adapter->req_rx_queues;
2395 channels->tx_count = adapter->req_tx_queues;
2396 channels->other_count = 0;
2397 channels->combined_count = 0;
2398}
2399
2400static int ibmvnic_set_channels(struct net_device *netdev,
2401 struct ethtool_channels *channels)
2402{
2403 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2404 int ret;
2405
2406 ret = 0;
2407 adapter->desired.rx_queues = channels->rx_count;
2408 adapter->desired.tx_queues = channels->tx_count;
2409
2410 ret = wait_for_reset(adapter);
2411
2412 if (!ret &&
2413 (adapter->req_rx_queues != channels->rx_count ||
2414 adapter->req_tx_queues != channels->tx_count))
2415 netdev_info(netdev,
2416 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2417 channels->rx_count, channels->tx_count,
2418 adapter->req_rx_queues, adapter->req_tx_queues);
2419 return ret;
2420
2421}
2422
2423static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2424{
2425 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2426 int i;
2427
2428 switch (stringset) {
2429 case ETH_SS_STATS:
2430 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2431 i++, data += ETH_GSTRING_LEN)
2432 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2433
2434 for (i = 0; i < adapter->req_tx_queues; i++) {
2435 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2436 data += ETH_GSTRING_LEN;
2437
2438 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2439 data += ETH_GSTRING_LEN;
2440
2441 snprintf(data, ETH_GSTRING_LEN,
2442 "tx%d_dropped_packets", i);
2443 data += ETH_GSTRING_LEN;
2444 }
2445
2446 for (i = 0; i < adapter->req_rx_queues; i++) {
2447 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2448 data += ETH_GSTRING_LEN;
2449
2450 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2451 data += ETH_GSTRING_LEN;
2452
2453 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2454 data += ETH_GSTRING_LEN;
2455 }
2456 break;
2457
2458 case ETH_SS_PRIV_FLAGS:
2459 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2460 strcpy(data + i * ETH_GSTRING_LEN,
2461 ibmvnic_priv_flags[i]);
2462 break;
2463 default:
2464 return;
2465 }
2466}
2467
2468static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2469{
2470 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2471
2472 switch (sset) {
2473 case ETH_SS_STATS:
2474 return ARRAY_SIZE(ibmvnic_stats) +
2475 adapter->req_tx_queues * NUM_TX_STATS +
2476 adapter->req_rx_queues * NUM_RX_STATS;
2477 case ETH_SS_PRIV_FLAGS:
2478 return ARRAY_SIZE(ibmvnic_priv_flags);
2479 default:
2480 return -EOPNOTSUPP;
2481 }
2482}
2483
2484static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2485 struct ethtool_stats *stats, u64 *data)
2486{
2487 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2488 union ibmvnic_crq crq;
2489 int i, j;
2490 int rc;
2491
2492 memset(&crq, 0, sizeof(crq));
2493 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2494 crq.request_statistics.cmd = REQUEST_STATISTICS;
2495 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2496 crq.request_statistics.len =
2497 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2498
2499
2500 init_completion(&adapter->stats_done);
2501 rc = ibmvnic_send_crq(adapter, &crq);
2502 if (rc)
2503 return;
2504 wait_for_completion(&adapter->stats_done);
2505
2506 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2507 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2508 ibmvnic_stats[i].offset));
2509
2510 for (j = 0; j < adapter->req_tx_queues; j++) {
2511 data[i] = adapter->tx_stats_buffers[j].packets;
2512 i++;
2513 data[i] = adapter->tx_stats_buffers[j].bytes;
2514 i++;
2515 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2516 i++;
2517 }
2518
2519 for (j = 0; j < adapter->req_rx_queues; j++) {
2520 data[i] = adapter->rx_stats_buffers[j].packets;
2521 i++;
2522 data[i] = adapter->rx_stats_buffers[j].bytes;
2523 i++;
2524 data[i] = adapter->rx_stats_buffers[j].interrupts;
2525 i++;
2526 }
2527}
2528
2529static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2530{
2531 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2532
2533 return adapter->priv_flags;
2534}
2535
2536static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2537{
2538 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2539 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2540
2541 if (which_maxes)
2542 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2543 else
2544 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2545
2546 return 0;
2547}
2548static const struct ethtool_ops ibmvnic_ethtool_ops = {
2549 .get_drvinfo = ibmvnic_get_drvinfo,
2550 .get_msglevel = ibmvnic_get_msglevel,
2551 .set_msglevel = ibmvnic_set_msglevel,
2552 .get_link = ibmvnic_get_link,
2553 .get_ringparam = ibmvnic_get_ringparam,
2554 .set_ringparam = ibmvnic_set_ringparam,
2555 .get_channels = ibmvnic_get_channels,
2556 .set_channels = ibmvnic_set_channels,
2557 .get_strings = ibmvnic_get_strings,
2558 .get_sset_count = ibmvnic_get_sset_count,
2559 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2560 .get_link_ksettings = ibmvnic_get_link_ksettings,
2561 .get_priv_flags = ibmvnic_get_priv_flags,
2562 .set_priv_flags = ibmvnic_set_priv_flags,
2563};
2564
2565
2566
2567static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2568 struct ibmvnic_sub_crq_queue *scrq)
2569{
2570 int rc;
2571
2572 if (scrq->irq) {
2573 free_irq(scrq->irq, scrq);
2574 irq_dispose_mapping(scrq->irq);
2575 scrq->irq = 0;
2576 }
2577
2578 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2579 atomic_set(&scrq->used, 0);
2580 scrq->cur = 0;
2581
2582 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2583 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2584 return rc;
2585}
2586
2587static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2588{
2589 int i, rc;
2590
2591 for (i = 0; i < adapter->req_tx_queues; i++) {
2592 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2593 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2594 if (rc)
2595 return rc;
2596 }
2597
2598 for (i = 0; i < adapter->req_rx_queues; i++) {
2599 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2600 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2601 if (rc)
2602 return rc;
2603 }
2604
2605 return rc;
2606}
2607
2608static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2609 struct ibmvnic_sub_crq_queue *scrq,
2610 bool do_h_free)
2611{
2612 struct device *dev = &adapter->vdev->dev;
2613 long rc;
2614
2615 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2616
2617 if (do_h_free) {
2618
2619 do {
2620 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2621 adapter->vdev->unit_address,
2622 scrq->crq_num);
2623 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2624
2625 if (rc) {
2626 netdev_err(adapter->netdev,
2627 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2628 scrq->crq_num, rc);
2629 }
2630 }
2631
2632 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2633 DMA_BIDIRECTIONAL);
2634 free_pages((unsigned long)scrq->msgs, 2);
2635 kfree(scrq);
2636}
2637
2638static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2639 *adapter)
2640{
2641 struct device *dev = &adapter->vdev->dev;
2642 struct ibmvnic_sub_crq_queue *scrq;
2643 int rc;
2644
2645 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2646 if (!scrq)
2647 return NULL;
2648
2649 scrq->msgs =
2650 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2651 if (!scrq->msgs) {
2652 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2653 goto zero_page_failed;
2654 }
2655
2656 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2657 DMA_BIDIRECTIONAL);
2658 if (dma_mapping_error(dev, scrq->msg_token)) {
2659 dev_warn(dev, "Couldn't map crq queue messages page\n");
2660 goto map_failed;
2661 }
2662
2663 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2664 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2665
2666 if (rc == H_RESOURCE)
2667 rc = ibmvnic_reset_crq(adapter);
2668
2669 if (rc == H_CLOSED) {
2670 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2671 } else if (rc) {
2672 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2673 goto reg_failed;
2674 }
2675
2676 scrq->adapter = adapter;
2677 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2678 spin_lock_init(&scrq->lock);
2679
2680 netdev_dbg(adapter->netdev,
2681 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2682 scrq->crq_num, scrq->hw_irq, scrq->irq);
2683
2684 return scrq;
2685
2686reg_failed:
2687 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2688 DMA_BIDIRECTIONAL);
2689map_failed:
2690 free_pages((unsigned long)scrq->msgs, 2);
2691zero_page_failed:
2692 kfree(scrq);
2693
2694 return NULL;
2695}
2696
2697static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2698{
2699 int i;
2700
2701 if (adapter->tx_scrq) {
2702 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2703 if (!adapter->tx_scrq[i])
2704 continue;
2705
2706 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2707 i);
2708 if (adapter->tx_scrq[i]->irq) {
2709 free_irq(adapter->tx_scrq[i]->irq,
2710 adapter->tx_scrq[i]);
2711 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2712 adapter->tx_scrq[i]->irq = 0;
2713 }
2714
2715 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2716 do_h_free);
2717 }
2718
2719 kfree(adapter->tx_scrq);
2720 adapter->tx_scrq = NULL;
2721 adapter->num_active_tx_scrqs = 0;
2722 }
2723
2724 if (adapter->rx_scrq) {
2725 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2726 if (!adapter->rx_scrq[i])
2727 continue;
2728
2729 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2730 i);
2731 if (adapter->rx_scrq[i]->irq) {
2732 free_irq(adapter->rx_scrq[i]->irq,
2733 adapter->rx_scrq[i]);
2734 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2735 adapter->rx_scrq[i]->irq = 0;
2736 }
2737
2738 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2739 do_h_free);
2740 }
2741
2742 kfree(adapter->rx_scrq);
2743 adapter->rx_scrq = NULL;
2744 adapter->num_active_rx_scrqs = 0;
2745 }
2746}
2747
2748static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2749 struct ibmvnic_sub_crq_queue *scrq)
2750{
2751 struct device *dev = &adapter->vdev->dev;
2752 unsigned long rc;
2753
2754 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2755 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2756 if (rc)
2757 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2758 scrq->hw_irq, rc);
2759 return rc;
2760}
2761
2762static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2763 struct ibmvnic_sub_crq_queue *scrq)
2764{
2765 struct device *dev = &adapter->vdev->dev;
2766 unsigned long rc;
2767
2768 if (scrq->hw_irq > 0x100000000ULL) {
2769 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2770 return 1;
2771 }
2772
2773 if (adapter->resetting &&
2774 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2775 u64 val = (0xff000000) | scrq->hw_irq;
2776
2777 rc = plpar_hcall_norets(H_EOI, val);
2778 if (rc)
2779 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2780 val, rc);
2781 }
2782
2783 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2784 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2785 if (rc)
2786 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2787 scrq->hw_irq, rc);
2788 return rc;
2789}
2790
2791static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2792 struct ibmvnic_sub_crq_queue *scrq)
2793{
2794 struct device *dev = &adapter->vdev->dev;
2795 struct ibmvnic_tx_pool *tx_pool;
2796 struct ibmvnic_tx_buff *txbuff;
2797 union sub_crq *next;
2798 int index;
2799 int i, j;
2800
2801restart_loop:
2802 while (pending_scrq(adapter, scrq)) {
2803 unsigned int pool = scrq->pool_index;
2804 int num_entries = 0;
2805
2806 next = ibmvnic_next_scrq(adapter, scrq);
2807 for (i = 0; i < next->tx_comp.num_comps; i++) {
2808 if (next->tx_comp.rcs[i]) {
2809 dev_err(dev, "tx error %x\n",
2810 next->tx_comp.rcs[i]);
2811 continue;
2812 }
2813 index = be32_to_cpu(next->tx_comp.correlators[i]);
2814 if (index & IBMVNIC_TSO_POOL_MASK) {
2815 tx_pool = &adapter->tso_pool[pool];
2816 index &= ~IBMVNIC_TSO_POOL_MASK;
2817 } else {
2818 tx_pool = &adapter->tx_pool[pool];
2819 }
2820
2821 txbuff = &tx_pool->tx_buff[index];
2822
2823 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2824 if (!txbuff->data_dma[j])
2825 continue;
2826
2827 txbuff->data_dma[j] = 0;
2828 }
2829
2830 if (txbuff->last_frag) {
2831 dev_kfree_skb_any(txbuff->skb);
2832 txbuff->skb = NULL;
2833 }
2834
2835 num_entries += txbuff->num_entries;
2836
2837 tx_pool->free_map[tx_pool->producer_index] = index;
2838 tx_pool->producer_index =
2839 (tx_pool->producer_index + 1) %
2840 tx_pool->num_buffers;
2841 }
2842
2843 next->tx_comp.first = 0;
2844
2845 if (atomic_sub_return(num_entries, &scrq->used) <=
2846 (adapter->req_tx_entries_per_subcrq / 2) &&
2847 __netif_subqueue_stopped(adapter->netdev,
2848 scrq->pool_index)) {
2849 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2850 netdev_dbg(adapter->netdev, "Started queue %d\n",
2851 scrq->pool_index);
2852 }
2853 }
2854
2855 enable_scrq_irq(adapter, scrq);
2856
2857 if (pending_scrq(adapter, scrq)) {
2858 disable_scrq_irq(adapter, scrq);
2859 goto restart_loop;
2860 }
2861
2862 return 0;
2863}
2864
2865static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2866{
2867 struct ibmvnic_sub_crq_queue *scrq = instance;
2868 struct ibmvnic_adapter *adapter = scrq->adapter;
2869
2870 disable_scrq_irq(adapter, scrq);
2871 ibmvnic_complete_tx(adapter, scrq);
2872
2873 return IRQ_HANDLED;
2874}
2875
2876static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2877{
2878 struct ibmvnic_sub_crq_queue *scrq = instance;
2879 struct ibmvnic_adapter *adapter = scrq->adapter;
2880
2881
2882
2883
2884 if (unlikely(adapter->state != VNIC_OPEN))
2885 return IRQ_NONE;
2886
2887 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2888
2889 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2890 disable_scrq_irq(adapter, scrq);
2891 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2892 }
2893
2894 return IRQ_HANDLED;
2895}
2896
2897static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2898{
2899 struct device *dev = &adapter->vdev->dev;
2900 struct ibmvnic_sub_crq_queue *scrq;
2901 int i = 0, j = 0;
2902 int rc = 0;
2903
2904 for (i = 0; i < adapter->req_tx_queues; i++) {
2905 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2906 i);
2907 scrq = adapter->tx_scrq[i];
2908 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2909
2910 if (!scrq->irq) {
2911 rc = -EINVAL;
2912 dev_err(dev, "Error mapping irq\n");
2913 goto req_tx_irq_failed;
2914 }
2915
2916 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
2917 adapter->vdev->unit_address, i);
2918 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2919 0, scrq->name, scrq);
2920
2921 if (rc) {
2922 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2923 scrq->irq, rc);
2924 irq_dispose_mapping(scrq->irq);
2925 goto req_tx_irq_failed;
2926 }
2927 }
2928
2929 for (i = 0; i < adapter->req_rx_queues; i++) {
2930 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2931 i);
2932 scrq = adapter->rx_scrq[i];
2933 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2934 if (!scrq->irq) {
2935 rc = -EINVAL;
2936 dev_err(dev, "Error mapping irq\n");
2937 goto req_rx_irq_failed;
2938 }
2939 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
2940 adapter->vdev->unit_address, i);
2941 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2942 0, scrq->name, scrq);
2943 if (rc) {
2944 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2945 scrq->irq, rc);
2946 irq_dispose_mapping(scrq->irq);
2947 goto req_rx_irq_failed;
2948 }
2949 }
2950 return rc;
2951
2952req_rx_irq_failed:
2953 for (j = 0; j < i; j++) {
2954 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2955 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2956 }
2957 i = adapter->req_tx_queues;
2958req_tx_irq_failed:
2959 for (j = 0; j < i; j++) {
2960 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2961 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2962 }
2963 release_sub_crqs(adapter, 1);
2964 return rc;
2965}
2966
2967static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2968{
2969 struct device *dev = &adapter->vdev->dev;
2970 struct ibmvnic_sub_crq_queue **allqueues;
2971 int registered_queues = 0;
2972 int total_queues;
2973 int more = 0;
2974 int i;
2975
2976 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2977
2978 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2979 if (!allqueues)
2980 return -1;
2981
2982 for (i = 0; i < total_queues; i++) {
2983 allqueues[i] = init_sub_crq_queue(adapter);
2984 if (!allqueues[i]) {
2985 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2986 break;
2987 }
2988 registered_queues++;
2989 }
2990
2991
2992 if (registered_queues <
2993 adapter->min_tx_queues + adapter->min_rx_queues) {
2994 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2995 goto tx_failed;
2996 }
2997
2998
2999 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3000 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3001 switch (i % 3) {
3002 case 0:
3003 if (adapter->req_rx_queues > adapter->min_rx_queues)
3004 adapter->req_rx_queues--;
3005 else
3006 more++;
3007 break;
3008 case 1:
3009 if (adapter->req_tx_queues > adapter->min_tx_queues)
3010 adapter->req_tx_queues--;
3011 else
3012 more++;
3013 break;
3014 }
3015 }
3016
3017 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3018 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3019 if (!adapter->tx_scrq)
3020 goto tx_failed;
3021
3022 for (i = 0; i < adapter->req_tx_queues; i++) {
3023 adapter->tx_scrq[i] = allqueues[i];
3024 adapter->tx_scrq[i]->pool_index = i;
3025 adapter->num_active_tx_scrqs++;
3026 }
3027
3028 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3029 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3030 if (!adapter->rx_scrq)
3031 goto rx_failed;
3032
3033 for (i = 0; i < adapter->req_rx_queues; i++) {
3034 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3035 adapter->rx_scrq[i]->scrq_num = i;
3036 adapter->num_active_rx_scrqs++;
3037 }
3038
3039 kfree(allqueues);
3040 return 0;
3041
3042rx_failed:
3043 kfree(adapter->tx_scrq);
3044 adapter->tx_scrq = NULL;
3045tx_failed:
3046 for (i = 0; i < registered_queues; i++)
3047 release_sub_crq_queue(adapter, allqueues[i], 1);
3048 kfree(allqueues);
3049 return -1;
3050}
3051
3052static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3053{
3054 struct device *dev = &adapter->vdev->dev;
3055 union ibmvnic_crq crq;
3056 int max_entries;
3057
3058 if (!retry) {
3059
3060 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3061
3062 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3063 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3064 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3065 return;
3066 }
3067
3068 if (adapter->desired.mtu)
3069 adapter->req_mtu = adapter->desired.mtu;
3070 else
3071 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3072
3073 if (!adapter->desired.tx_entries)
3074 adapter->desired.tx_entries =
3075 adapter->max_tx_entries_per_subcrq;
3076 if (!adapter->desired.rx_entries)
3077 adapter->desired.rx_entries =
3078 adapter->max_rx_add_entries_per_subcrq;
3079
3080 max_entries = IBMVNIC_MAX_LTB_SIZE /
3081 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3082
3083 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3084 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3085 adapter->desired.tx_entries = max_entries;
3086 }
3087
3088 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3089 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3090 adapter->desired.rx_entries = max_entries;
3091 }
3092
3093 if (adapter->desired.tx_entries)
3094 adapter->req_tx_entries_per_subcrq =
3095 adapter->desired.tx_entries;
3096 else
3097 adapter->req_tx_entries_per_subcrq =
3098 adapter->max_tx_entries_per_subcrq;
3099
3100 if (adapter->desired.rx_entries)
3101 adapter->req_rx_add_entries_per_subcrq =
3102 adapter->desired.rx_entries;
3103 else
3104 adapter->req_rx_add_entries_per_subcrq =
3105 adapter->max_rx_add_entries_per_subcrq;
3106
3107 if (adapter->desired.tx_queues)
3108 adapter->req_tx_queues =
3109 adapter->desired.tx_queues;
3110 else
3111 adapter->req_tx_queues =
3112 adapter->opt_tx_comp_sub_queues;
3113
3114 if (adapter->desired.rx_queues)
3115 adapter->req_rx_queues =
3116 adapter->desired.rx_queues;
3117 else
3118 adapter->req_rx_queues =
3119 adapter->opt_rx_comp_queues;
3120
3121 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3122 }
3123
3124 memset(&crq, 0, sizeof(crq));
3125 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3126 crq.request_capability.cmd = REQUEST_CAPABILITY;
3127
3128 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3129 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3130 atomic_inc(&adapter->running_cap_crqs);
3131 ibmvnic_send_crq(adapter, &crq);
3132
3133 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3134 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3135 atomic_inc(&adapter->running_cap_crqs);
3136 ibmvnic_send_crq(adapter, &crq);
3137
3138 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3139 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3140 atomic_inc(&adapter->running_cap_crqs);
3141 ibmvnic_send_crq(adapter, &crq);
3142
3143 crq.request_capability.capability =
3144 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3145 crq.request_capability.number =
3146 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3147 atomic_inc(&adapter->running_cap_crqs);
3148 ibmvnic_send_crq(adapter, &crq);
3149
3150 crq.request_capability.capability =
3151 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3152 crq.request_capability.number =
3153 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3154 atomic_inc(&adapter->running_cap_crqs);
3155 ibmvnic_send_crq(adapter, &crq);
3156
3157 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3158 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3159 atomic_inc(&adapter->running_cap_crqs);
3160 ibmvnic_send_crq(adapter, &crq);
3161
3162 if (adapter->netdev->flags & IFF_PROMISC) {
3163 if (adapter->promisc_supported) {
3164 crq.request_capability.capability =
3165 cpu_to_be16(PROMISC_REQUESTED);
3166 crq.request_capability.number = cpu_to_be64(1);
3167 atomic_inc(&adapter->running_cap_crqs);
3168 ibmvnic_send_crq(adapter, &crq);
3169 }
3170 } else {
3171 crq.request_capability.capability =
3172 cpu_to_be16(PROMISC_REQUESTED);
3173 crq.request_capability.number = cpu_to_be64(0);
3174 atomic_inc(&adapter->running_cap_crqs);
3175 ibmvnic_send_crq(adapter, &crq);
3176 }
3177}
3178
3179static int pending_scrq(struct ibmvnic_adapter *adapter,
3180 struct ibmvnic_sub_crq_queue *scrq)
3181{
3182 union sub_crq *entry = &scrq->msgs[scrq->cur];
3183
3184 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3185 return 1;
3186 else
3187 return 0;
3188}
3189
3190static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3191 struct ibmvnic_sub_crq_queue *scrq)
3192{
3193 union sub_crq *entry;
3194 unsigned long flags;
3195
3196 spin_lock_irqsave(&scrq->lock, flags);
3197 entry = &scrq->msgs[scrq->cur];
3198 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3199 if (++scrq->cur == scrq->size)
3200 scrq->cur = 0;
3201 } else {
3202 entry = NULL;
3203 }
3204 spin_unlock_irqrestore(&scrq->lock, flags);
3205
3206 return entry;
3207}
3208
3209static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3210{
3211 struct ibmvnic_crq_queue *queue = &adapter->crq;
3212 union ibmvnic_crq *crq;
3213
3214 crq = &queue->msgs[queue->cur];
3215 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3216 if (++queue->cur == queue->size)
3217 queue->cur = 0;
3218 } else {
3219 crq = NULL;
3220 }
3221
3222 return crq;
3223}
3224
3225static void print_subcrq_error(struct device *dev, int rc, const char *func)
3226{
3227 switch (rc) {
3228 case H_PARAMETER:
3229 dev_warn_ratelimited(dev,
3230 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3231 func, rc);
3232 break;
3233 case H_CLOSED:
3234 dev_warn_ratelimited(dev,
3235 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3236 func, rc);
3237 break;
3238 default:
3239 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3240 break;
3241 }
3242}
3243
3244static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3245 union sub_crq *sub_crq)
3246{
3247 unsigned int ua = adapter->vdev->unit_address;
3248 struct device *dev = &adapter->vdev->dev;
3249 u64 *u64_crq = (u64 *)sub_crq;
3250 int rc;
3251
3252 netdev_dbg(adapter->netdev,
3253 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3254 (unsigned long int)cpu_to_be64(remote_handle),
3255 (unsigned long int)cpu_to_be64(u64_crq[0]),
3256 (unsigned long int)cpu_to_be64(u64_crq[1]),
3257 (unsigned long int)cpu_to_be64(u64_crq[2]),
3258 (unsigned long int)cpu_to_be64(u64_crq[3]));
3259
3260
3261 mb();
3262
3263 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3264 cpu_to_be64(remote_handle),
3265 cpu_to_be64(u64_crq[0]),
3266 cpu_to_be64(u64_crq[1]),
3267 cpu_to_be64(u64_crq[2]),
3268 cpu_to_be64(u64_crq[3]));
3269
3270 if (rc)
3271 print_subcrq_error(dev, rc, __func__);
3272
3273 return rc;
3274}
3275
3276static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3277 u64 remote_handle, u64 ioba, u64 num_entries)
3278{
3279 unsigned int ua = adapter->vdev->unit_address;
3280 struct device *dev = &adapter->vdev->dev;
3281 int rc;
3282
3283
3284 mb();
3285 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3286 cpu_to_be64(remote_handle),
3287 ioba, num_entries);
3288
3289 if (rc)
3290 print_subcrq_error(dev, rc, __func__);
3291
3292 return rc;
3293}
3294
3295static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3296 union ibmvnic_crq *crq)
3297{
3298 unsigned int ua = adapter->vdev->unit_address;
3299 struct device *dev = &adapter->vdev->dev;
3300 u64 *u64_crq = (u64 *)crq;
3301 int rc;
3302
3303 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3304 (unsigned long int)cpu_to_be64(u64_crq[0]),
3305 (unsigned long int)cpu_to_be64(u64_crq[1]));
3306
3307 if (!adapter->crq.active &&
3308 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3309 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3310 return -EINVAL;
3311 }
3312
3313
3314 mb();
3315
3316 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3317 cpu_to_be64(u64_crq[0]),
3318 cpu_to_be64(u64_crq[1]));
3319
3320 if (rc) {
3321 if (rc == H_CLOSED) {
3322 dev_warn(dev, "CRQ Queue closed\n");
3323 if (adapter->resetting)
3324 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3325 }
3326
3327 dev_warn(dev, "Send error (rc=%d)\n", rc);
3328 }
3329
3330 return rc;
3331}
3332
3333static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3334{
3335 union ibmvnic_crq crq;
3336
3337 memset(&crq, 0, sizeof(crq));
3338 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3339 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3340 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3341
3342 return ibmvnic_send_crq(adapter, &crq);
3343}
3344
3345static int send_version_xchg(struct ibmvnic_adapter *adapter)
3346{
3347 union ibmvnic_crq crq;
3348
3349 memset(&crq, 0, sizeof(crq));
3350 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3351 crq.version_exchange.cmd = VERSION_EXCHANGE;
3352 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3353
3354 return ibmvnic_send_crq(adapter, &crq);
3355}
3356
3357struct vnic_login_client_data {
3358 u8 type;
3359 __be16 len;
3360 char name[];
3361} __packed;
3362
3363static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3364{
3365 int len;
3366
3367
3368
3369
3370
3371 len = 4 * sizeof(struct vnic_login_client_data);
3372 len += 6;
3373 len += strlen(utsname()->nodename) + 1;
3374 len += strlen(adapter->netdev->name) + 1;
3375
3376 return len;
3377}
3378
3379static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3380 struct vnic_login_client_data *vlcd)
3381{
3382 const char *os_name = "Linux";
3383 int len;
3384
3385
3386 vlcd->type = 1;
3387 len = strlen(os_name) + 1;
3388 vlcd->len = cpu_to_be16(len);
3389 strncpy(vlcd->name, os_name, len);
3390 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3391
3392
3393 vlcd->type = 2;
3394 len = strlen(utsname()->nodename) + 1;
3395 vlcd->len = cpu_to_be16(len);
3396 strncpy(vlcd->name, utsname()->nodename, len);
3397 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3398
3399
3400 vlcd->type = 3;
3401 len = strlen(adapter->netdev->name) + 1;
3402 vlcd->len = cpu_to_be16(len);
3403 strncpy(vlcd->name, adapter->netdev->name, len);
3404}
3405
3406static int send_login(struct ibmvnic_adapter *adapter)
3407{
3408 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3409 struct ibmvnic_login_buffer *login_buffer;
3410 struct device *dev = &adapter->vdev->dev;
3411 dma_addr_t rsp_buffer_token;
3412 dma_addr_t buffer_token;
3413 size_t rsp_buffer_size;
3414 union ibmvnic_crq crq;
3415 size_t buffer_size;
3416 __be64 *tx_list_p;
3417 __be64 *rx_list_p;
3418 int client_data_len;
3419 struct vnic_login_client_data *vlcd;
3420 int i;
3421
3422 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3423 netdev_err(adapter->netdev,
3424 "RX or TX queues are not allocated, device login failed\n");
3425 return -1;
3426 }
3427
3428 release_login_rsp_buffer(adapter);
3429 client_data_len = vnic_client_data_len(adapter);
3430
3431 buffer_size =
3432 sizeof(struct ibmvnic_login_buffer) +
3433 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3434 client_data_len;
3435
3436 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3437 if (!login_buffer)
3438 goto buf_alloc_failed;
3439
3440 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3441 DMA_TO_DEVICE);
3442 if (dma_mapping_error(dev, buffer_token)) {
3443 dev_err(dev, "Couldn't map login buffer\n");
3444 goto buf_map_failed;
3445 }
3446
3447 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3448 sizeof(u64) * adapter->req_tx_queues +
3449 sizeof(u64) * adapter->req_rx_queues +
3450 sizeof(u64) * adapter->req_rx_queues +
3451 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3452
3453 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3454 if (!login_rsp_buffer)
3455 goto buf_rsp_alloc_failed;
3456
3457 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3458 rsp_buffer_size, DMA_FROM_DEVICE);
3459 if (dma_mapping_error(dev, rsp_buffer_token)) {
3460 dev_err(dev, "Couldn't map login rsp buffer\n");
3461 goto buf_rsp_map_failed;
3462 }
3463
3464 adapter->login_buf = login_buffer;
3465 adapter->login_buf_token = buffer_token;
3466 adapter->login_buf_sz = buffer_size;
3467 adapter->login_rsp_buf = login_rsp_buffer;
3468 adapter->login_rsp_buf_token = rsp_buffer_token;
3469 adapter->login_rsp_buf_sz = rsp_buffer_size;
3470
3471 login_buffer->len = cpu_to_be32(buffer_size);
3472 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3473 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3474 login_buffer->off_txcomp_subcrqs =
3475 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3476 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3477 login_buffer->off_rxcomp_subcrqs =
3478 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3479 sizeof(u64) * adapter->req_tx_queues);
3480 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3481 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3482
3483 tx_list_p = (__be64 *)((char *)login_buffer +
3484 sizeof(struct ibmvnic_login_buffer));
3485 rx_list_p = (__be64 *)((char *)login_buffer +
3486 sizeof(struct ibmvnic_login_buffer) +
3487 sizeof(u64) * adapter->req_tx_queues);
3488
3489 for (i = 0; i < adapter->req_tx_queues; i++) {
3490 if (adapter->tx_scrq[i]) {
3491 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3492 crq_num);
3493 }
3494 }
3495
3496 for (i = 0; i < adapter->req_rx_queues; i++) {
3497 if (adapter->rx_scrq[i]) {
3498 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3499 crq_num);
3500 }
3501 }
3502
3503
3504 vlcd = (struct vnic_login_client_data *)
3505 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3506 login_buffer->client_data_offset =
3507 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3508 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3509
3510 vnic_add_client_data(adapter, vlcd);
3511
3512 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3513 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3514 netdev_dbg(adapter->netdev, "%016lx\n",
3515 ((unsigned long int *)(adapter->login_buf))[i]);
3516 }
3517
3518 memset(&crq, 0, sizeof(crq));
3519 crq.login.first = IBMVNIC_CRQ_CMD;
3520 crq.login.cmd = LOGIN;
3521 crq.login.ioba = cpu_to_be32(buffer_token);
3522 crq.login.len = cpu_to_be32(buffer_size);
3523 ibmvnic_send_crq(adapter, &crq);
3524
3525 return 0;
3526
3527buf_rsp_map_failed:
3528 kfree(login_rsp_buffer);
3529buf_rsp_alloc_failed:
3530 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3531buf_map_failed:
3532 kfree(login_buffer);
3533buf_alloc_failed:
3534 return -1;
3535}
3536
3537static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3538 u32 len, u8 map_id)
3539{
3540 union ibmvnic_crq crq;
3541
3542 memset(&crq, 0, sizeof(crq));
3543 crq.request_map.first = IBMVNIC_CRQ_CMD;
3544 crq.request_map.cmd = REQUEST_MAP;
3545 crq.request_map.map_id = map_id;
3546 crq.request_map.ioba = cpu_to_be32(addr);
3547 crq.request_map.len = cpu_to_be32(len);
3548 return ibmvnic_send_crq(adapter, &crq);
3549}
3550
3551static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3552{
3553 union ibmvnic_crq crq;
3554
3555 memset(&crq, 0, sizeof(crq));
3556 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3557 crq.request_unmap.cmd = REQUEST_UNMAP;
3558 crq.request_unmap.map_id = map_id;
3559 return ibmvnic_send_crq(adapter, &crq);
3560}
3561
3562static void send_map_query(struct ibmvnic_adapter *adapter)
3563{
3564 union ibmvnic_crq crq;
3565
3566 memset(&crq, 0, sizeof(crq));
3567 crq.query_map.first = IBMVNIC_CRQ_CMD;
3568 crq.query_map.cmd = QUERY_MAP;
3569 ibmvnic_send_crq(adapter, &crq);
3570}
3571
3572
3573static void send_cap_queries(struct ibmvnic_adapter *adapter)
3574{
3575 union ibmvnic_crq crq;
3576
3577 atomic_set(&adapter->running_cap_crqs, 0);
3578 memset(&crq, 0, sizeof(crq));
3579 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3580 crq.query_capability.cmd = QUERY_CAPABILITY;
3581
3582 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3583 atomic_inc(&adapter->running_cap_crqs);
3584 ibmvnic_send_crq(adapter, &crq);
3585
3586 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3587 atomic_inc(&adapter->running_cap_crqs);
3588 ibmvnic_send_crq(adapter, &crq);
3589
3590 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3591 atomic_inc(&adapter->running_cap_crqs);
3592 ibmvnic_send_crq(adapter, &crq);
3593
3594 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3595 atomic_inc(&adapter->running_cap_crqs);
3596 ibmvnic_send_crq(adapter, &crq);
3597
3598 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3599 atomic_inc(&adapter->running_cap_crqs);
3600 ibmvnic_send_crq(adapter, &crq);
3601
3602 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3603 atomic_inc(&adapter->running_cap_crqs);
3604 ibmvnic_send_crq(adapter, &crq);
3605
3606 crq.query_capability.capability =
3607 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3608 atomic_inc(&adapter->running_cap_crqs);
3609 ibmvnic_send_crq(adapter, &crq);
3610
3611 crq.query_capability.capability =
3612 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3613 atomic_inc(&adapter->running_cap_crqs);
3614 ibmvnic_send_crq(adapter, &crq);
3615
3616 crq.query_capability.capability =
3617 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3618 atomic_inc(&adapter->running_cap_crqs);
3619 ibmvnic_send_crq(adapter, &crq);
3620
3621 crq.query_capability.capability =
3622 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3623 atomic_inc(&adapter->running_cap_crqs);
3624 ibmvnic_send_crq(adapter, &crq);
3625
3626 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3627 atomic_inc(&adapter->running_cap_crqs);
3628 ibmvnic_send_crq(adapter, &crq);
3629
3630 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3631 atomic_inc(&adapter->running_cap_crqs);
3632 ibmvnic_send_crq(adapter, &crq);
3633
3634 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3635 atomic_inc(&adapter->running_cap_crqs);
3636 ibmvnic_send_crq(adapter, &crq);
3637
3638 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3639 atomic_inc(&adapter->running_cap_crqs);
3640 ibmvnic_send_crq(adapter, &crq);
3641
3642 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3643 atomic_inc(&adapter->running_cap_crqs);
3644 ibmvnic_send_crq(adapter, &crq);
3645
3646 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3647 atomic_inc(&adapter->running_cap_crqs);
3648 ibmvnic_send_crq(adapter, &crq);
3649
3650 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3651 atomic_inc(&adapter->running_cap_crqs);
3652 ibmvnic_send_crq(adapter, &crq);
3653
3654 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3655 atomic_inc(&adapter->running_cap_crqs);
3656 ibmvnic_send_crq(adapter, &crq);
3657
3658 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3659 atomic_inc(&adapter->running_cap_crqs);
3660 ibmvnic_send_crq(adapter, &crq);
3661
3662 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3663 atomic_inc(&adapter->running_cap_crqs);
3664 ibmvnic_send_crq(adapter, &crq);
3665
3666 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3667 atomic_inc(&adapter->running_cap_crqs);
3668 ibmvnic_send_crq(adapter, &crq);
3669
3670 crq.query_capability.capability =
3671 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3672 atomic_inc(&adapter->running_cap_crqs);
3673 ibmvnic_send_crq(adapter, &crq);
3674
3675 crq.query_capability.capability =
3676 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3677 atomic_inc(&adapter->running_cap_crqs);
3678 ibmvnic_send_crq(adapter, &crq);
3679
3680 crq.query_capability.capability =
3681 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3682 atomic_inc(&adapter->running_cap_crqs);
3683 ibmvnic_send_crq(adapter, &crq);
3684
3685 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3686 atomic_inc(&adapter->running_cap_crqs);
3687 ibmvnic_send_crq(adapter, &crq);
3688}
3689
3690static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3691 struct ibmvnic_adapter *adapter)
3692{
3693 struct device *dev = &adapter->vdev->dev;
3694
3695 if (crq->get_vpd_size_rsp.rc.code) {
3696 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3697 crq->get_vpd_size_rsp.rc.code);
3698 complete(&adapter->fw_done);
3699 return;
3700 }
3701
3702 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3703 complete(&adapter->fw_done);
3704}
3705
3706static void handle_vpd_rsp(union ibmvnic_crq *crq,
3707 struct ibmvnic_adapter *adapter)
3708{
3709 struct device *dev = &adapter->vdev->dev;
3710 unsigned char *substr = NULL;
3711 u8 fw_level_len = 0;
3712
3713 memset(adapter->fw_version, 0, 32);
3714
3715 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3716 DMA_FROM_DEVICE);
3717
3718 if (crq->get_vpd_rsp.rc.code) {
3719 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3720 crq->get_vpd_rsp.rc.code);
3721 goto complete;
3722 }
3723
3724
3725
3726
3727 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3728 if (!substr) {
3729 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3730 goto complete;
3731 }
3732
3733
3734 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3735 fw_level_len = *(substr + 2);
3736 } else {
3737 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3738 goto complete;
3739 }
3740
3741
3742 if ((substr + 3 + fw_level_len) <
3743 (adapter->vpd->buff + adapter->vpd->len)) {
3744 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3745 } else {
3746 dev_info(dev, "FW substr extrapolated VPD buff\n");
3747 }
3748
3749complete:
3750 if (adapter->fw_version[0] == '\0')
3751 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3752 complete(&adapter->fw_done);
3753}
3754
3755static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3756{
3757 struct device *dev = &adapter->vdev->dev;
3758 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3759 netdev_features_t old_hw_features = 0;
3760 union ibmvnic_crq crq;
3761 int i;
3762
3763 dma_unmap_single(dev, adapter->ip_offload_tok,
3764 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3765
3766 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3767 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3768 netdev_dbg(adapter->netdev, "%016lx\n",
3769 ((unsigned long int *)(buf))[i]);
3770
3771 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3772 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3773 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3774 buf->tcp_ipv4_chksum);
3775 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3776 buf->tcp_ipv6_chksum);
3777 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3778 buf->udp_ipv4_chksum);
3779 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3780 buf->udp_ipv6_chksum);
3781 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3782 buf->large_tx_ipv4);
3783 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3784 buf->large_tx_ipv6);
3785 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3786 buf->large_rx_ipv4);
3787 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3788 buf->large_rx_ipv6);
3789 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3790 buf->max_ipv4_header_size);
3791 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3792 buf->max_ipv6_header_size);
3793 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3794 buf->max_tcp_header_size);
3795 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3796 buf->max_udp_header_size);
3797 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3798 buf->max_large_tx_size);
3799 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3800 buf->max_large_rx_size);
3801 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3802 buf->ipv6_extension_header);
3803 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3804 buf->tcp_pseudosum_req);
3805 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3806 buf->num_ipv6_ext_headers);
3807 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3808 buf->off_ipv6_ext_headers);
3809
3810 adapter->ip_offload_ctrl_tok =
3811 dma_map_single(dev, &adapter->ip_offload_ctrl,
3812 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3813
3814 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3815 dev_err(dev, "Couldn't map ip offload control buffer\n");
3816 return;
3817 }
3818
3819 adapter->ip_offload_ctrl.len =
3820 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3821 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3822 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3823 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3824 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3825 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3826 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3827 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3828 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3829 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3830
3831
3832 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3833 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3834
3835 if (adapter->state != VNIC_PROBING) {
3836 old_hw_features = adapter->netdev->hw_features;
3837 adapter->netdev->hw_features = 0;
3838 }
3839
3840 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3841
3842 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3843 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3844
3845 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3846 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3847
3848 if ((adapter->netdev->features &
3849 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3850 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3851
3852 if (buf->large_tx_ipv4)
3853 adapter->netdev->hw_features |= NETIF_F_TSO;
3854 if (buf->large_tx_ipv6)
3855 adapter->netdev->hw_features |= NETIF_F_TSO6;
3856
3857 if (adapter->state == VNIC_PROBING) {
3858 adapter->netdev->features |= adapter->netdev->hw_features;
3859 } else if (old_hw_features != adapter->netdev->hw_features) {
3860 netdev_features_t tmp = 0;
3861
3862
3863 adapter->netdev->features &= adapter->netdev->hw_features;
3864
3865 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3866 adapter->netdev->hw_features;
3867 adapter->netdev->features |=
3868 tmp & adapter->netdev->wanted_features;
3869 }
3870
3871 memset(&crq, 0, sizeof(crq));
3872 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3873 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3874 crq.control_ip_offload.len =
3875 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3876 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3877 ibmvnic_send_crq(adapter, &crq);
3878}
3879
3880static const char *ibmvnic_fw_err_cause(u16 cause)
3881{
3882 switch (cause) {
3883 case ADAPTER_PROBLEM:
3884 return "adapter problem";
3885 case BUS_PROBLEM:
3886 return "bus problem";
3887 case FW_PROBLEM:
3888 return "firmware problem";
3889 case DD_PROBLEM:
3890 return "device driver problem";
3891 case EEH_RECOVERY:
3892 return "EEH recovery";
3893 case FW_UPDATED:
3894 return "firmware updated";
3895 case LOW_MEMORY:
3896 return "low Memory";
3897 default:
3898 return "unknown";
3899 }
3900}
3901
3902static void handle_error_indication(union ibmvnic_crq *crq,
3903 struct ibmvnic_adapter *adapter)
3904{
3905 struct device *dev = &adapter->vdev->dev;
3906 u16 cause;
3907
3908 cause = be16_to_cpu(crq->error_indication.error_cause);
3909
3910 dev_warn_ratelimited(dev,
3911 "Firmware reports %serror, cause: %s. Starting recovery...\n",
3912 crq->error_indication.flags
3913 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3914 ibmvnic_fw_err_cause(cause));
3915
3916 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3917 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3918 else
3919 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3920}
3921
3922static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3923 struct ibmvnic_adapter *adapter)
3924{
3925 struct net_device *netdev = adapter->netdev;
3926 struct device *dev = &adapter->vdev->dev;
3927 long rc;
3928
3929 rc = crq->change_mac_addr_rsp.rc.code;
3930 if (rc) {
3931 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3932 goto out;
3933 }
3934 ether_addr_copy(netdev->dev_addr,
3935 &crq->change_mac_addr_rsp.mac_addr[0]);
3936out:
3937 complete(&adapter->fw_done);
3938 return rc;
3939}
3940
3941static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3942 struct ibmvnic_adapter *adapter)
3943{
3944 struct device *dev = &adapter->vdev->dev;
3945 u64 *req_value;
3946 char *name;
3947
3948 atomic_dec(&adapter->running_cap_crqs);
3949 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3950 case REQ_TX_QUEUES:
3951 req_value = &adapter->req_tx_queues;
3952 name = "tx";
3953 break;
3954 case REQ_RX_QUEUES:
3955 req_value = &adapter->req_rx_queues;
3956 name = "rx";
3957 break;
3958 case REQ_RX_ADD_QUEUES:
3959 req_value = &adapter->req_rx_add_queues;
3960 name = "rx_add";
3961 break;
3962 case REQ_TX_ENTRIES_PER_SUBCRQ:
3963 req_value = &adapter->req_tx_entries_per_subcrq;
3964 name = "tx_entries_per_subcrq";
3965 break;
3966 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3967 req_value = &adapter->req_rx_add_entries_per_subcrq;
3968 name = "rx_add_entries_per_subcrq";
3969 break;
3970 case REQ_MTU:
3971 req_value = &adapter->req_mtu;
3972 name = "mtu";
3973 break;
3974 case PROMISC_REQUESTED:
3975 req_value = &adapter->promisc;
3976 name = "promisc";
3977 break;
3978 default:
3979 dev_err(dev, "Got invalid cap request rsp %d\n",
3980 crq->request_capability.capability);
3981 return;
3982 }
3983
3984 switch (crq->request_capability_rsp.rc.code) {
3985 case SUCCESS:
3986 break;
3987 case PARTIALSUCCESS:
3988 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3989 *req_value,
3990 (long int)be64_to_cpu(crq->request_capability_rsp.
3991 number), name);
3992
3993 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3994 REQ_MTU) {
3995 pr_err("mtu of %llu is not supported. Reverting.\n",
3996 *req_value);
3997 *req_value = adapter->fallback.mtu;
3998 } else {
3999 *req_value =
4000 be64_to_cpu(crq->request_capability_rsp.number);
4001 }
4002
4003 ibmvnic_send_req_caps(adapter, 1);
4004 return;
4005 default:
4006 dev_err(dev, "Error %d in request cap rsp\n",
4007 crq->request_capability_rsp.rc.code);
4008 return;
4009 }
4010
4011
4012 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4013 union ibmvnic_crq newcrq;
4014 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4015 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4016 &adapter->ip_offload_buf;
4017
4018 adapter->wait_capability = false;
4019 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4020 buf_sz,
4021 DMA_FROM_DEVICE);
4022
4023 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4024 if (!firmware_has_feature(FW_FEATURE_CMO))
4025 dev_err(dev, "Couldn't map offload buffer\n");
4026 return;
4027 }
4028
4029 memset(&newcrq, 0, sizeof(newcrq));
4030 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4031 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4032 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4033 newcrq.query_ip_offload.ioba =
4034 cpu_to_be32(adapter->ip_offload_tok);
4035
4036 ibmvnic_send_crq(adapter, &newcrq);
4037 }
4038}
4039
4040static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4041 struct ibmvnic_adapter *adapter)
4042{
4043 struct device *dev = &adapter->vdev->dev;
4044 struct net_device *netdev = adapter->netdev;
4045 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4046 struct ibmvnic_login_buffer *login = adapter->login_buf;
4047 int i;
4048
4049 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4050 DMA_TO_DEVICE);
4051 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4052 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4053
4054
4055
4056
4057
4058 if (login_rsp_crq->generic.rc.code) {
4059 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4060 complete(&adapter->init_done);
4061 return 0;
4062 }
4063
4064 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4065
4066 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4067 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4068 netdev_dbg(adapter->netdev, "%016lx\n",
4069 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4070 }
4071
4072
4073 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4074 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4075 adapter->req_rx_add_queues !=
4076 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4077 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4078 ibmvnic_remove(adapter->vdev);
4079 return -EIO;
4080 }
4081 release_login_buffer(adapter);
4082 complete(&adapter->init_done);
4083
4084 return 0;
4085}
4086
4087static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4088 struct ibmvnic_adapter *adapter)
4089{
4090 struct device *dev = &adapter->vdev->dev;
4091 long rc;
4092
4093 rc = crq->request_unmap_rsp.rc.code;
4094 if (rc)
4095 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4096}
4097
4098static void handle_query_map_rsp(union ibmvnic_crq *crq,
4099 struct ibmvnic_adapter *adapter)
4100{
4101 struct net_device *netdev = adapter->netdev;
4102 struct device *dev = &adapter->vdev->dev;
4103 long rc;
4104
4105 rc = crq->query_map_rsp.rc.code;
4106 if (rc) {
4107 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4108 return;
4109 }
4110 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4111 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4112 crq->query_map_rsp.free_pages);
4113}
4114
4115static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4116 struct ibmvnic_adapter *adapter)
4117{
4118 struct net_device *netdev = adapter->netdev;
4119 struct device *dev = &adapter->vdev->dev;
4120 long rc;
4121
4122 atomic_dec(&adapter->running_cap_crqs);
4123 netdev_dbg(netdev, "Outstanding queries: %d\n",
4124 atomic_read(&adapter->running_cap_crqs));
4125 rc = crq->query_capability.rc.code;
4126 if (rc) {
4127 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4128 goto out;
4129 }
4130
4131 switch (be16_to_cpu(crq->query_capability.capability)) {
4132 case MIN_TX_QUEUES:
4133 adapter->min_tx_queues =
4134 be64_to_cpu(crq->query_capability.number);
4135 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4136 adapter->min_tx_queues);
4137 break;
4138 case MIN_RX_QUEUES:
4139 adapter->min_rx_queues =
4140 be64_to_cpu(crq->query_capability.number);
4141 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4142 adapter->min_rx_queues);
4143 break;
4144 case MIN_RX_ADD_QUEUES:
4145 adapter->min_rx_add_queues =
4146 be64_to_cpu(crq->query_capability.number);
4147 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4148 adapter->min_rx_add_queues);
4149 break;
4150 case MAX_TX_QUEUES:
4151 adapter->max_tx_queues =
4152 be64_to_cpu(crq->query_capability.number);
4153 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4154 adapter->max_tx_queues);
4155 break;
4156 case MAX_RX_QUEUES:
4157 adapter->max_rx_queues =
4158 be64_to_cpu(crq->query_capability.number);
4159 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4160 adapter->max_rx_queues);
4161 break;
4162 case MAX_RX_ADD_QUEUES:
4163 adapter->max_rx_add_queues =
4164 be64_to_cpu(crq->query_capability.number);
4165 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4166 adapter->max_rx_add_queues);
4167 break;
4168 case MIN_TX_ENTRIES_PER_SUBCRQ:
4169 adapter->min_tx_entries_per_subcrq =
4170 be64_to_cpu(crq->query_capability.number);
4171 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4172 adapter->min_tx_entries_per_subcrq);
4173 break;
4174 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4175 adapter->min_rx_add_entries_per_subcrq =
4176 be64_to_cpu(crq->query_capability.number);
4177 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4178 adapter->min_rx_add_entries_per_subcrq);
4179 break;
4180 case MAX_TX_ENTRIES_PER_SUBCRQ:
4181 adapter->max_tx_entries_per_subcrq =
4182 be64_to_cpu(crq->query_capability.number);
4183 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4184 adapter->max_tx_entries_per_subcrq);
4185 break;
4186 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4187 adapter->max_rx_add_entries_per_subcrq =
4188 be64_to_cpu(crq->query_capability.number);
4189 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4190 adapter->max_rx_add_entries_per_subcrq);
4191 break;
4192 case TCP_IP_OFFLOAD:
4193 adapter->tcp_ip_offload =
4194 be64_to_cpu(crq->query_capability.number);
4195 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4196 adapter->tcp_ip_offload);
4197 break;
4198 case PROMISC_SUPPORTED:
4199 adapter->promisc_supported =
4200 be64_to_cpu(crq->query_capability.number);
4201 netdev_dbg(netdev, "promisc_supported = %lld\n",
4202 adapter->promisc_supported);
4203 break;
4204 case MIN_MTU:
4205 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4206 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4207 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4208 break;
4209 case MAX_MTU:
4210 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4211 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4212 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4213 break;
4214 case MAX_MULTICAST_FILTERS:
4215 adapter->max_multicast_filters =
4216 be64_to_cpu(crq->query_capability.number);
4217 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4218 adapter->max_multicast_filters);
4219 break;
4220 case VLAN_HEADER_INSERTION:
4221 adapter->vlan_header_insertion =
4222 be64_to_cpu(crq->query_capability.number);
4223 if (adapter->vlan_header_insertion)
4224 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4225 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4226 adapter->vlan_header_insertion);
4227 break;
4228 case RX_VLAN_HEADER_INSERTION:
4229 adapter->rx_vlan_header_insertion =
4230 be64_to_cpu(crq->query_capability.number);
4231 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4232 adapter->rx_vlan_header_insertion);
4233 break;
4234 case MAX_TX_SG_ENTRIES:
4235 adapter->max_tx_sg_entries =
4236 be64_to_cpu(crq->query_capability.number);
4237 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4238 adapter->max_tx_sg_entries);
4239 break;
4240 case RX_SG_SUPPORTED:
4241 adapter->rx_sg_supported =
4242 be64_to_cpu(crq->query_capability.number);
4243 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4244 adapter->rx_sg_supported);
4245 break;
4246 case OPT_TX_COMP_SUB_QUEUES:
4247 adapter->opt_tx_comp_sub_queues =
4248 be64_to_cpu(crq->query_capability.number);
4249 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4250 adapter->opt_tx_comp_sub_queues);
4251 break;
4252 case OPT_RX_COMP_QUEUES:
4253 adapter->opt_rx_comp_queues =
4254 be64_to_cpu(crq->query_capability.number);
4255 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4256 adapter->opt_rx_comp_queues);
4257 break;
4258 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4259 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4260 be64_to_cpu(crq->query_capability.number);
4261 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4262 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4263 break;
4264 case OPT_TX_ENTRIES_PER_SUBCRQ:
4265 adapter->opt_tx_entries_per_subcrq =
4266 be64_to_cpu(crq->query_capability.number);
4267 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4268 adapter->opt_tx_entries_per_subcrq);
4269 break;
4270 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4271 adapter->opt_rxba_entries_per_subcrq =
4272 be64_to_cpu(crq->query_capability.number);
4273 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4274 adapter->opt_rxba_entries_per_subcrq);
4275 break;
4276 case TX_RX_DESC_REQ:
4277 adapter->tx_rx_desc_req = crq->query_capability.number;
4278 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4279 adapter->tx_rx_desc_req);
4280 break;
4281
4282 default:
4283 netdev_err(netdev, "Got invalid cap rsp %d\n",
4284 crq->query_capability.capability);
4285 }
4286
4287out:
4288 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4289 adapter->wait_capability = false;
4290 ibmvnic_send_req_caps(adapter, 0);
4291 }
4292}
4293
4294static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4295{
4296 union ibmvnic_crq crq;
4297 int rc;
4298
4299 memset(&crq, 0, sizeof(crq));
4300 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4301 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4302 init_completion(&adapter->fw_done);
4303 rc = ibmvnic_send_crq(adapter, &crq);
4304 if (rc)
4305 return rc;
4306 wait_for_completion(&adapter->fw_done);
4307 return adapter->fw_done_rc ? -EIO : 0;
4308}
4309
4310static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4311 struct ibmvnic_adapter *adapter)
4312{
4313 struct net_device *netdev = adapter->netdev;
4314 int rc;
4315
4316 rc = crq->query_phys_parms_rsp.rc.code;
4317 if (rc) {
4318 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4319 return rc;
4320 }
4321 switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
4322 case IBMVNIC_10MBPS:
4323 adapter->speed = SPEED_10;
4324 break;
4325 case IBMVNIC_100MBPS:
4326 adapter->speed = SPEED_100;
4327 break;
4328 case IBMVNIC_1GBPS:
4329 adapter->speed = SPEED_1000;
4330 break;
4331 case IBMVNIC_10GBP:
4332 adapter->speed = SPEED_10000;
4333 break;
4334 case IBMVNIC_25GBPS:
4335 adapter->speed = SPEED_25000;
4336 break;
4337 case IBMVNIC_40GBPS:
4338 adapter->speed = SPEED_40000;
4339 break;
4340 case IBMVNIC_50GBPS:
4341 adapter->speed = SPEED_50000;
4342 break;
4343 case IBMVNIC_100GBPS:
4344 adapter->speed = SPEED_100000;
4345 break;
4346 default:
4347 netdev_warn(netdev, "Unknown speed 0x%08x\n",
4348 cpu_to_be32(crq->query_phys_parms_rsp.speed));
4349 adapter->speed = SPEED_UNKNOWN;
4350 }
4351 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4352 adapter->duplex = DUPLEX_FULL;
4353 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4354 adapter->duplex = DUPLEX_HALF;
4355 else
4356 adapter->duplex = DUPLEX_UNKNOWN;
4357
4358 return rc;
4359}
4360
4361static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4362 struct ibmvnic_adapter *adapter)
4363{
4364 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4365 struct net_device *netdev = adapter->netdev;
4366 struct device *dev = &adapter->vdev->dev;
4367 u64 *u64_crq = (u64 *)crq;
4368 long rc;
4369
4370 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4371 (unsigned long int)cpu_to_be64(u64_crq[0]),
4372 (unsigned long int)cpu_to_be64(u64_crq[1]));
4373 switch (gen_crq->first) {
4374 case IBMVNIC_CRQ_INIT_RSP:
4375 switch (gen_crq->cmd) {
4376 case IBMVNIC_CRQ_INIT:
4377 dev_info(dev, "Partner initialized\n");
4378 adapter->from_passive_init = true;
4379 adapter->failover_pending = false;
4380 if (!completion_done(&adapter->init_done)) {
4381 complete(&adapter->init_done);
4382 adapter->init_done_rc = -EIO;
4383 }
4384 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4385 break;
4386 case IBMVNIC_CRQ_INIT_COMPLETE:
4387 dev_info(dev, "Partner initialization complete\n");
4388 adapter->crq.active = true;
4389 send_version_xchg(adapter);
4390 break;
4391 default:
4392 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4393 }
4394 return;
4395 case IBMVNIC_CRQ_XPORT_EVENT:
4396 netif_carrier_off(netdev);
4397 adapter->crq.active = false;
4398 if (adapter->resetting)
4399 adapter->force_reset_recovery = true;
4400 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4401 dev_info(dev, "Migrated, re-enabling adapter\n");
4402 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4403 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4404 dev_info(dev, "Backing device failover detected\n");
4405 adapter->failover_pending = true;
4406 } else {
4407
4408 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4409 gen_crq->cmd);
4410 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4411 }
4412 return;
4413 case IBMVNIC_CRQ_CMD_RSP:
4414 break;
4415 default:
4416 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4417 gen_crq->first);
4418 return;
4419 }
4420
4421 switch (gen_crq->cmd) {
4422 case VERSION_EXCHANGE_RSP:
4423 rc = crq->version_exchange_rsp.rc.code;
4424 if (rc) {
4425 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4426 break;
4427 }
4428 dev_info(dev, "Partner protocol version is %d\n",
4429 crq->version_exchange_rsp.version);
4430 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4431 ibmvnic_version)
4432 ibmvnic_version =
4433 be16_to_cpu(crq->version_exchange_rsp.version);
4434 send_cap_queries(adapter);
4435 break;
4436 case QUERY_CAPABILITY_RSP:
4437 handle_query_cap_rsp(crq, adapter);
4438 break;
4439 case QUERY_MAP_RSP:
4440 handle_query_map_rsp(crq, adapter);
4441 break;
4442 case REQUEST_MAP_RSP:
4443 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4444 complete(&adapter->fw_done);
4445 break;
4446 case REQUEST_UNMAP_RSP:
4447 handle_request_unmap_rsp(crq, adapter);
4448 break;
4449 case REQUEST_CAPABILITY_RSP:
4450 handle_request_cap_rsp(crq, adapter);
4451 break;
4452 case LOGIN_RSP:
4453 netdev_dbg(netdev, "Got Login Response\n");
4454 handle_login_rsp(crq, adapter);
4455 break;
4456 case LOGICAL_LINK_STATE_RSP:
4457 netdev_dbg(netdev,
4458 "Got Logical Link State Response, state: %d rc: %d\n",
4459 crq->logical_link_state_rsp.link_state,
4460 crq->logical_link_state_rsp.rc.code);
4461 adapter->logical_link_state =
4462 crq->logical_link_state_rsp.link_state;
4463 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4464 complete(&adapter->init_done);
4465 break;
4466 case LINK_STATE_INDICATION:
4467 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4468 adapter->phys_link_state =
4469 crq->link_state_indication.phys_link_state;
4470 adapter->logical_link_state =
4471 crq->link_state_indication.logical_link_state;
4472 if (adapter->phys_link_state && adapter->logical_link_state)
4473 netif_carrier_on(netdev);
4474 else
4475 netif_carrier_off(netdev);
4476 break;
4477 case CHANGE_MAC_ADDR_RSP:
4478 netdev_dbg(netdev, "Got MAC address change Response\n");
4479 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4480 break;
4481 case ERROR_INDICATION:
4482 netdev_dbg(netdev, "Got Error Indication\n");
4483 handle_error_indication(crq, adapter);
4484 break;
4485 case REQUEST_STATISTICS_RSP:
4486 netdev_dbg(netdev, "Got Statistics Response\n");
4487 complete(&adapter->stats_done);
4488 break;
4489 case QUERY_IP_OFFLOAD_RSP:
4490 netdev_dbg(netdev, "Got Query IP offload Response\n");
4491 handle_query_ip_offload_rsp(adapter);
4492 break;
4493 case MULTICAST_CTRL_RSP:
4494 netdev_dbg(netdev, "Got multicast control Response\n");
4495 break;
4496 case CONTROL_IP_OFFLOAD_RSP:
4497 netdev_dbg(netdev, "Got Control IP offload Response\n");
4498 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4499 sizeof(adapter->ip_offload_ctrl),
4500 DMA_TO_DEVICE);
4501 complete(&adapter->init_done);
4502 break;
4503 case COLLECT_FW_TRACE_RSP:
4504 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4505 complete(&adapter->fw_done);
4506 break;
4507 case GET_VPD_SIZE_RSP:
4508 handle_vpd_size_rsp(crq, adapter);
4509 break;
4510 case GET_VPD_RSP:
4511 handle_vpd_rsp(crq, adapter);
4512 break;
4513 case QUERY_PHYS_PARMS_RSP:
4514 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4515 complete(&adapter->fw_done);
4516 break;
4517 default:
4518 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4519 gen_crq->cmd);
4520 }
4521}
4522
4523static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4524{
4525 struct ibmvnic_adapter *adapter = instance;
4526
4527 tasklet_schedule(&adapter->tasklet);
4528 return IRQ_HANDLED;
4529}
4530
4531static void ibmvnic_tasklet(void *data)
4532{
4533 struct ibmvnic_adapter *adapter = data;
4534 struct ibmvnic_crq_queue *queue = &adapter->crq;
4535 union ibmvnic_crq *crq;
4536 unsigned long flags;
4537 bool done = false;
4538
4539 spin_lock_irqsave(&queue->lock, flags);
4540 while (!done) {
4541
4542 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4543 ibmvnic_handle_crq(crq, adapter);
4544 crq->generic.first = 0;
4545 }
4546
4547
4548
4549
4550 if (!adapter->wait_capability)
4551 done = true;
4552 }
4553
4554
4555
4556 if (atomic_read(&adapter->running_cap_crqs) != 0)
4557 adapter->wait_capability = true;
4558 spin_unlock_irqrestore(&queue->lock, flags);
4559}
4560
4561static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4562{
4563 struct vio_dev *vdev = adapter->vdev;
4564 int rc;
4565
4566 do {
4567 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4568 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4569
4570 if (rc)
4571 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4572
4573 return rc;
4574}
4575
4576static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4577{
4578 struct ibmvnic_crq_queue *crq = &adapter->crq;
4579 struct device *dev = &adapter->vdev->dev;
4580 struct vio_dev *vdev = adapter->vdev;
4581 int rc;
4582
4583
4584 do {
4585 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4586 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4587
4588
4589 memset(crq->msgs, 0, PAGE_SIZE);
4590 crq->cur = 0;
4591 crq->active = false;
4592
4593
4594 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4595 crq->msg_token, PAGE_SIZE);
4596
4597 if (rc == H_CLOSED)
4598
4599 dev_warn(dev, "Partner adapter not ready\n");
4600 else if (rc != 0)
4601 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4602
4603 return rc;
4604}
4605
4606static void release_crq_queue(struct ibmvnic_adapter *adapter)
4607{
4608 struct ibmvnic_crq_queue *crq = &adapter->crq;
4609 struct vio_dev *vdev = adapter->vdev;
4610 long rc;
4611
4612 if (!crq->msgs)
4613 return;
4614
4615 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4616 free_irq(vdev->irq, adapter);
4617 tasklet_kill(&adapter->tasklet);
4618 do {
4619 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4620 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4621
4622 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4623 DMA_BIDIRECTIONAL);
4624 free_page((unsigned long)crq->msgs);
4625 crq->msgs = NULL;
4626 crq->active = false;
4627}
4628
4629static int init_crq_queue(struct ibmvnic_adapter *adapter)
4630{
4631 struct ibmvnic_crq_queue *crq = &adapter->crq;
4632 struct device *dev = &adapter->vdev->dev;
4633 struct vio_dev *vdev = adapter->vdev;
4634 int rc, retrc = -ENOMEM;
4635
4636 if (crq->msgs)
4637 return 0;
4638
4639 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4640
4641
4642 if (!crq->msgs)
4643 return -ENOMEM;
4644
4645 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4646 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4647 DMA_BIDIRECTIONAL);
4648 if (dma_mapping_error(dev, crq->msg_token))
4649 goto map_failed;
4650
4651 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4652 crq->msg_token, PAGE_SIZE);
4653
4654 if (rc == H_RESOURCE)
4655
4656 rc = ibmvnic_reset_crq(adapter);
4657 retrc = rc;
4658
4659 if (rc == H_CLOSED) {
4660 dev_warn(dev, "Partner adapter not ready\n");
4661 } else if (rc) {
4662 dev_warn(dev, "Error %d opening adapter\n", rc);
4663 goto reg_crq_failed;
4664 }
4665
4666 retrc = 0;
4667
4668 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4669 (unsigned long)adapter);
4670
4671 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4672 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4673 adapter->vdev->unit_address);
4674 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4675 if (rc) {
4676 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4677 vdev->irq, rc);
4678 goto req_irq_failed;
4679 }
4680
4681 rc = vio_enable_interrupts(vdev);
4682 if (rc) {
4683 dev_err(dev, "Error %d enabling interrupts\n", rc);
4684 goto req_irq_failed;
4685 }
4686
4687 crq->cur = 0;
4688 spin_lock_init(&crq->lock);
4689
4690 return retrc;
4691
4692req_irq_failed:
4693 tasklet_kill(&adapter->tasklet);
4694 do {
4695 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4696 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4697reg_crq_failed:
4698 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4699map_failed:
4700 free_page((unsigned long)crq->msgs);
4701 crq->msgs = NULL;
4702 return retrc;
4703}
4704
4705static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4706{
4707 struct device *dev = &adapter->vdev->dev;
4708 unsigned long timeout = msecs_to_jiffies(30000);
4709 u64 old_num_rx_queues, old_num_tx_queues;
4710 int rc;
4711
4712 adapter->from_passive_init = false;
4713
4714 old_num_rx_queues = adapter->req_rx_queues;
4715 old_num_tx_queues = adapter->req_tx_queues;
4716
4717 reinit_completion(&adapter->init_done);
4718 adapter->init_done_rc = 0;
4719 ibmvnic_send_crq_init(adapter);
4720 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4721 dev_err(dev, "Initialization sequence timed out\n");
4722 return -1;
4723 }
4724
4725 if (adapter->init_done_rc) {
4726 release_crq_queue(adapter);
4727 return adapter->init_done_rc;
4728 }
4729
4730 if (adapter->from_passive_init) {
4731 adapter->state = VNIC_OPEN;
4732 adapter->from_passive_init = false;
4733 return -1;
4734 }
4735
4736 if (adapter->resetting && !adapter->wait_for_reset &&
4737 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4738 if (adapter->req_rx_queues != old_num_rx_queues ||
4739 adapter->req_tx_queues != old_num_tx_queues) {
4740 release_sub_crqs(adapter, 0);
4741 rc = init_sub_crqs(adapter);
4742 } else {
4743 rc = reset_sub_crq_queues(adapter);
4744 }
4745 } else {
4746 rc = init_sub_crqs(adapter);
4747 }
4748
4749 if (rc) {
4750 dev_err(dev, "Initialization of sub crqs failed\n");
4751 release_crq_queue(adapter);
4752 return rc;
4753 }
4754
4755 rc = init_sub_crq_irqs(adapter);
4756 if (rc) {
4757 dev_err(dev, "Failed to initialize sub crq irqs\n");
4758 release_crq_queue(adapter);
4759 }
4760
4761 return rc;
4762}
4763
4764static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4765{
4766 struct device *dev = &adapter->vdev->dev;
4767 unsigned long timeout = msecs_to_jiffies(30000);
4768 int rc;
4769
4770 adapter->from_passive_init = false;
4771
4772 adapter->init_done_rc = 0;
4773 ibmvnic_send_crq_init(adapter);
4774 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4775 dev_err(dev, "Initialization sequence timed out\n");
4776 return -1;
4777 }
4778
4779 if (adapter->init_done_rc) {
4780 release_crq_queue(adapter);
4781 return adapter->init_done_rc;
4782 }
4783
4784 if (adapter->from_passive_init) {
4785 adapter->state = VNIC_OPEN;
4786 adapter->from_passive_init = false;
4787 return -1;
4788 }
4789
4790 rc = init_sub_crqs(adapter);
4791 if (rc) {
4792 dev_err(dev, "Initialization of sub crqs failed\n");
4793 release_crq_queue(adapter);
4794 return rc;
4795 }
4796
4797 rc = init_sub_crq_irqs(adapter);
4798 if (rc) {
4799 dev_err(dev, "Failed to initialize sub crq irqs\n");
4800 release_crq_queue(adapter);
4801 }
4802
4803 return rc;
4804}
4805
4806static struct device_attribute dev_attr_failover;
4807
4808static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4809{
4810 struct ibmvnic_adapter *adapter;
4811 struct net_device *netdev;
4812 unsigned char *mac_addr_p;
4813 int rc;
4814
4815 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4816 dev->unit_address);
4817
4818 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4819 VETH_MAC_ADDR, NULL);
4820 if (!mac_addr_p) {
4821 dev_err(&dev->dev,
4822 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4823 __FILE__, __LINE__);
4824 return 0;
4825 }
4826
4827 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4828 IBMVNIC_MAX_QUEUES);
4829 if (!netdev)
4830 return -ENOMEM;
4831
4832 adapter = netdev_priv(netdev);
4833 adapter->state = VNIC_PROBING;
4834 dev_set_drvdata(&dev->dev, netdev);
4835 adapter->vdev = dev;
4836 adapter->netdev = netdev;
4837
4838 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4839 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4840 netdev->irq = dev->irq;
4841 netdev->netdev_ops = &ibmvnic_netdev_ops;
4842 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4843 SET_NETDEV_DEV(netdev, &dev->dev);
4844
4845 spin_lock_init(&adapter->stats_lock);
4846
4847 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4848 INIT_LIST_HEAD(&adapter->rwi_list);
4849 spin_lock_init(&adapter->rwi_lock);
4850 init_completion(&adapter->init_done);
4851 adapter->resetting = false;
4852
4853 do {
4854 rc = init_crq_queue(adapter);
4855 if (rc) {
4856 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4857 rc);
4858 goto ibmvnic_init_fail;
4859 }
4860
4861 rc = ibmvnic_init(adapter);
4862 if (rc && rc != EAGAIN)
4863 goto ibmvnic_init_fail;
4864 } while (rc == EAGAIN);
4865
4866 rc = init_stats_buffers(adapter);
4867 if (rc)
4868 goto ibmvnic_init_fail;
4869
4870 rc = init_stats_token(adapter);
4871 if (rc)
4872 goto ibmvnic_stats_fail;
4873
4874 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4875 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4876 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4877
4878 rc = device_create_file(&dev->dev, &dev_attr_failover);
4879 if (rc)
4880 goto ibmvnic_dev_file_err;
4881
4882 netif_carrier_off(netdev);
4883 rc = register_netdev(netdev);
4884 if (rc) {
4885 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4886 goto ibmvnic_register_fail;
4887 }
4888 dev_info(&dev->dev, "ibmvnic registered\n");
4889
4890 adapter->state = VNIC_PROBED;
4891
4892 adapter->wait_for_reset = false;
4893
4894 return 0;
4895
4896ibmvnic_register_fail:
4897 device_remove_file(&dev->dev, &dev_attr_failover);
4898
4899ibmvnic_dev_file_err:
4900 release_stats_token(adapter);
4901
4902ibmvnic_stats_fail:
4903 release_stats_buffers(adapter);
4904
4905ibmvnic_init_fail:
4906 release_sub_crqs(adapter, 1);
4907 release_crq_queue(adapter);
4908 free_netdev(netdev);
4909
4910 return rc;
4911}
4912
4913static int ibmvnic_remove(struct vio_dev *dev)
4914{
4915 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4916 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4917
4918 adapter->state = VNIC_REMOVING;
4919 rtnl_lock();
4920 unregister_netdevice(netdev);
4921
4922 release_resources(adapter);
4923 release_sub_crqs(adapter, 1);
4924 release_crq_queue(adapter);
4925
4926 release_stats_token(adapter);
4927 release_stats_buffers(adapter);
4928
4929 adapter->state = VNIC_REMOVED;
4930
4931 rtnl_unlock();
4932 device_remove_file(&dev->dev, &dev_attr_failover);
4933 free_netdev(netdev);
4934 dev_set_drvdata(&dev->dev, NULL);
4935
4936 return 0;
4937}
4938
4939static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4940 const char *buf, size_t count)
4941{
4942 struct net_device *netdev = dev_get_drvdata(dev);
4943 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4944 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4945 __be64 session_token;
4946 long rc;
4947
4948 if (!sysfs_streq(buf, "1"))
4949 return -EINVAL;
4950
4951 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4952 H_GET_SESSION_TOKEN, 0, 0, 0);
4953 if (rc) {
4954 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4955 rc);
4956 return -EINVAL;
4957 }
4958
4959 session_token = (__be64)retbuf[0];
4960 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4961 be64_to_cpu(session_token));
4962 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4963 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4964 if (rc) {
4965 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4966 rc);
4967 return -EINVAL;
4968 }
4969
4970 return count;
4971}
4972
4973static DEVICE_ATTR_WO(failover);
4974
4975static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4976{
4977 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4978 struct ibmvnic_adapter *adapter;
4979 struct iommu_table *tbl;
4980 unsigned long ret = 0;
4981 int i;
4982
4983 tbl = get_iommu_table_base(&vdev->dev);
4984
4985
4986 if (!netdev)
4987 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4988
4989 adapter = netdev_priv(netdev);
4990
4991 ret += PAGE_SIZE;
4992 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4993
4994 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4995 ret += 4 * PAGE_SIZE;
4996
4997 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4998 i++)
4999 ret += adapter->rx_pool[i].size *
5000 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5001
5002 return ret;
5003}
5004
5005static int ibmvnic_resume(struct device *dev)
5006{
5007 struct net_device *netdev = dev_get_drvdata(dev);
5008 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5009
5010 if (adapter->state != VNIC_OPEN)
5011 return 0;
5012
5013 tasklet_schedule(&adapter->tasklet);
5014
5015 return 0;
5016}
5017
5018static const struct vio_device_id ibmvnic_device_table[] = {
5019 {"network", "IBM,vnic"},
5020 {"", "" }
5021};
5022MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5023
5024static const struct dev_pm_ops ibmvnic_pm_ops = {
5025 .resume = ibmvnic_resume
5026};
5027
5028static struct vio_driver ibmvnic_driver = {
5029 .id_table = ibmvnic_device_table,
5030 .probe = ibmvnic_probe,
5031 .remove = ibmvnic_remove,
5032 .get_desired_dma = ibmvnic_get_desired_dma,
5033 .name = ibmvnic_driver_name,
5034 .pm = &ibmvnic_pm_ops,
5035};
5036
5037
5038static int __init ibmvnic_module_init(void)
5039{
5040 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5041 IBMVNIC_DRIVER_VERSION);
5042
5043 return vio_register_driver(&ibmvnic_driver);
5044}
5045
5046static void __exit ibmvnic_module_exit(void)
5047{
5048 vio_unregister_driver(&ibmvnic_driver);
5049}
5050
5051module_init(ibmvnic_module_init);
5052module_exit(ibmvnic_module_exit);
5053