1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
51#include <linux/if_arp.h>
52#include <linux/in.h>
53#include <linux/ip.h>
54#include <linux/ipv6.h>
55#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
58#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
66#include <linux/workqueue.h>
67#include <linux/if_vlan.h>
68#include <linux/utsname.h>
69
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
75MODULE_AUTHOR("Santiago Leon");
76MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static void release_sub_crqs(struct ibmvnic_adapter *, bool);
82static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
86static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
87static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96static int ibmvnic_poll(struct napi_struct *napi, int data);
97static void send_query_map(struct ibmvnic_adapter *adapter);
98static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
99static int send_request_unmap(struct ibmvnic_adapter *, u8);
100static int send_login(struct ibmvnic_adapter *adapter);
101static void send_query_cap(struct ibmvnic_adapter *adapter);
102static int init_sub_crqs(struct ibmvnic_adapter *);
103static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
104static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
105static void release_crq_queue(struct ibmvnic_adapter *);
106static int __ibmvnic_set_mac(struct net_device *, u8 *);
107static int init_crq_queue(struct ibmvnic_adapter *adapter);
108static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
109static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
110 struct ibmvnic_sub_crq_queue *tx_scrq);
111static void free_long_term_buff(struct ibmvnic_adapter *adapter,
112 struct ibmvnic_long_term_buff *ltb);
113static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
114
115struct ibmvnic_stat {
116 char name[ETH_GSTRING_LEN];
117 int offset;
118};
119
120#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
121 offsetof(struct ibmvnic_statistics, stat))
122#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
123
124static const struct ibmvnic_stat ibmvnic_stats[] = {
125 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
126 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
127 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
128 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
129 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
130 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
131 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
132 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
133 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
134 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
135 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
136 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
137 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
138 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
139 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
140 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
141 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
142 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
143 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
144 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
145 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
146 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
147};
148
149static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
150{
151 union ibmvnic_crq crq;
152
153 memset(&crq, 0, sizeof(crq));
154 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
155 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
156
157 return ibmvnic_send_crq(adapter, &crq);
158}
159
160static int send_version_xchg(struct ibmvnic_adapter *adapter)
161{
162 union ibmvnic_crq crq;
163
164 memset(&crq, 0, sizeof(crq));
165 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
166 crq.version_exchange.cmd = VERSION_EXCHANGE;
167 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
168
169 return ibmvnic_send_crq(adapter, &crq);
170}
171
172static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
173 unsigned long length, unsigned long *number,
174 unsigned long *irq)
175{
176 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
177 long rc;
178
179 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
180 *number = retbuf[0];
181 *irq = retbuf[1];
182
183 return rc;
184}
185
186
187
188
189
190
191
192
193
194
195static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
196 struct completion *comp_done,
197 unsigned long timeout)
198{
199 struct net_device *netdev;
200 unsigned long div_timeout;
201 u8 retry;
202
203 netdev = adapter->netdev;
204 retry = 5;
205 div_timeout = msecs_to_jiffies(timeout / retry);
206 while (true) {
207 if (!adapter->crq.active) {
208 netdev_err(netdev, "Device down!\n");
209 return -ENODEV;
210 }
211 if (!retry--)
212 break;
213 if (wait_for_completion_timeout(comp_done, div_timeout))
214 return 0;
215 }
216 netdev_err(netdev, "Operation timed out.\n");
217 return -ETIMEDOUT;
218}
219
220
221
222
223
224
225
226
227
228
229static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
230{
231 return (ltb->buff && ltb->size == size);
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
256 struct ibmvnic_long_term_buff *ltb, int size)
257{
258 struct device *dev = &adapter->vdev->dev;
259 int rc;
260
261 if (!reuse_ltb(ltb, size)) {
262 dev_dbg(dev,
263 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
264 ltb->size, size);
265 free_long_term_buff(adapter, ltb);
266 }
267
268 if (ltb->buff) {
269 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
270 ltb->map_id, ltb->size);
271 } else {
272 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
273 GFP_KERNEL);
274 if (!ltb->buff) {
275 dev_err(dev, "Couldn't alloc long term buffer\n");
276 return -ENOMEM;
277 }
278 ltb->size = size;
279
280 ltb->map_id = find_first_zero_bit(adapter->map_ids,
281 MAX_MAP_ID);
282 bitmap_set(adapter->map_ids, ltb->map_id, 1);
283
284 dev_dbg(dev,
285 "Allocated new LTB [map %d, size 0x%llx]\n",
286 ltb->map_id, ltb->size);
287 }
288
289
290 memset(ltb->buff, 0, ltb->size);
291
292 mutex_lock(&adapter->fw_lock);
293 adapter->fw_done_rc = 0;
294 reinit_completion(&adapter->fw_done);
295
296 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
297 if (rc) {
298 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
299 goto out;
300 }
301
302 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
303 if (rc) {
304 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
305 rc);
306 goto out;
307 }
308
309 if (adapter->fw_done_rc) {
310 dev_err(dev, "Couldn't map LTB, rc = %d\n",
311 adapter->fw_done_rc);
312 rc = -EIO;
313 goto out;
314 }
315 rc = 0;
316out:
317
318 mutex_unlock(&adapter->fw_lock);
319 return rc;
320}
321
322static void free_long_term_buff(struct ibmvnic_adapter *adapter,
323 struct ibmvnic_long_term_buff *ltb)
324{
325 struct device *dev = &adapter->vdev->dev;
326
327 if (!ltb->buff)
328 return;
329
330
331
332
333
334 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
335 adapter->reset_reason != VNIC_RESET_MOBILITY &&
336 adapter->reset_reason != VNIC_RESET_TIMEOUT)
337 send_request_unmap(adapter, ltb->map_id);
338
339 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
340
341 ltb->buff = NULL;
342
343 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
344 ltb->map_id = 0;
345}
346
347static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
348{
349 int i;
350
351 for (i = 0; i < adapter->num_active_rx_pools; i++)
352 adapter->rx_pool[i].active = 0;
353}
354
355static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
356 struct ibmvnic_rx_pool *pool)
357{
358 int count = pool->size - atomic_read(&pool->available);
359 u64 handle = adapter->rx_scrq[pool->index]->handle;
360 struct device *dev = &adapter->vdev->dev;
361 struct ibmvnic_ind_xmit_queue *ind_bufp;
362 struct ibmvnic_sub_crq_queue *rx_scrq;
363 union sub_crq *sub_crq;
364 int buffers_added = 0;
365 unsigned long lpar_rc;
366 struct sk_buff *skb;
367 unsigned int offset;
368 dma_addr_t dma_addr;
369 unsigned char *dst;
370 int shift = 0;
371 int index;
372 int i;
373
374 if (!pool->active)
375 return;
376
377 rx_scrq = adapter->rx_scrq[pool->index];
378 ind_bufp = &rx_scrq->ind_buf;
379
380
381
382
383
384
385
386 for (i = ind_bufp->index; i < count; ++i) {
387 index = pool->free_map[pool->next_free];
388
389
390
391
392
393
394 skb = pool->rx_buff[index].skb;
395 if (!skb) {
396 skb = netdev_alloc_skb(adapter->netdev,
397 pool->buff_size);
398 if (!skb) {
399 dev_err(dev, "Couldn't replenish rx buff\n");
400 adapter->replenish_no_mem++;
401 break;
402 }
403 }
404
405 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
406 pool->next_free = (pool->next_free + 1) % pool->size;
407
408
409 offset = index * pool->buff_size;
410 dst = pool->long_term_buff.buff + offset;
411 memset(dst, 0, pool->buff_size);
412 dma_addr = pool->long_term_buff.addr + offset;
413
414
415 pool->rx_buff[index].data = dst;
416 pool->rx_buff[index].dma = dma_addr;
417 pool->rx_buff[index].skb = skb;
418 pool->rx_buff[index].pool_index = pool->index;
419 pool->rx_buff[index].size = pool->buff_size;
420
421
422 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
423 memset(sub_crq, 0, sizeof(*sub_crq));
424 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
425 sub_crq->rx_add.correlator =
426 cpu_to_be64((u64)&pool->rx_buff[index]);
427 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
428 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
429
430
431
432
433
434
435#ifdef __LITTLE_ENDIAN__
436 shift = 8;
437#endif
438 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
439
440
441 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
442 i == count - 1) {
443 lpar_rc =
444 send_subcrq_indirect(adapter, handle,
445 (u64)ind_bufp->indir_dma,
446 (u64)ind_bufp->index);
447 if (lpar_rc != H_SUCCESS)
448 goto failure;
449 buffers_added += ind_bufp->index;
450 adapter->replenish_add_buff_success += ind_bufp->index;
451 ind_bufp->index = 0;
452 }
453 }
454 atomic_add(buffers_added, &pool->available);
455 return;
456
457failure:
458 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
459 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
460 for (i = ind_bufp->index - 1; i >= 0; --i) {
461 struct ibmvnic_rx_buff *rx_buff;
462
463 pool->next_free = pool->next_free == 0 ?
464 pool->size - 1 : pool->next_free - 1;
465 sub_crq = &ind_bufp->indir_arr[i];
466 rx_buff = (struct ibmvnic_rx_buff *)
467 be64_to_cpu(sub_crq->rx_add.correlator);
468 index = (int)(rx_buff - pool->rx_buff);
469 pool->free_map[pool->next_free] = index;
470 dev_kfree_skb_any(pool->rx_buff[index].skb);
471 pool->rx_buff[index].skb = NULL;
472 }
473 adapter->replenish_add_buff_failure += ind_bufp->index;
474 atomic_add(buffers_added, &pool->available);
475 ind_bufp->index = 0;
476 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
477
478
479
480
481
482 deactivate_rx_pools(adapter);
483 netif_carrier_off(adapter->netdev);
484 }
485}
486
487static void replenish_pools(struct ibmvnic_adapter *adapter)
488{
489 int i;
490
491 adapter->replenish_task_cycles++;
492 for (i = 0; i < adapter->num_active_rx_pools; i++) {
493 if (adapter->rx_pool[i].active)
494 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
495 }
496
497 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
498}
499
500static void release_stats_buffers(struct ibmvnic_adapter *adapter)
501{
502 kfree(adapter->tx_stats_buffers);
503 kfree(adapter->rx_stats_buffers);
504 adapter->tx_stats_buffers = NULL;
505 adapter->rx_stats_buffers = NULL;
506}
507
508static int init_stats_buffers(struct ibmvnic_adapter *adapter)
509{
510 adapter->tx_stats_buffers =
511 kcalloc(IBMVNIC_MAX_QUEUES,
512 sizeof(struct ibmvnic_tx_queue_stats),
513 GFP_KERNEL);
514 if (!adapter->tx_stats_buffers)
515 return -ENOMEM;
516
517 adapter->rx_stats_buffers =
518 kcalloc(IBMVNIC_MAX_QUEUES,
519 sizeof(struct ibmvnic_rx_queue_stats),
520 GFP_KERNEL);
521 if (!adapter->rx_stats_buffers)
522 return -ENOMEM;
523
524 return 0;
525}
526
527static void release_stats_token(struct ibmvnic_adapter *adapter)
528{
529 struct device *dev = &adapter->vdev->dev;
530
531 if (!adapter->stats_token)
532 return;
533
534 dma_unmap_single(dev, adapter->stats_token,
535 sizeof(struct ibmvnic_statistics),
536 DMA_FROM_DEVICE);
537 adapter->stats_token = 0;
538}
539
540static int init_stats_token(struct ibmvnic_adapter *adapter)
541{
542 struct device *dev = &adapter->vdev->dev;
543 dma_addr_t stok;
544 int rc;
545
546 stok = dma_map_single(dev, &adapter->stats,
547 sizeof(struct ibmvnic_statistics),
548 DMA_FROM_DEVICE);
549 rc = dma_mapping_error(dev, stok);
550 if (rc) {
551 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
552 return rc;
553 }
554
555 adapter->stats_token = stok;
556 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
557 return 0;
558}
559
560
561
562
563
564
565
566static void release_rx_pools(struct ibmvnic_adapter *adapter)
567{
568 struct ibmvnic_rx_pool *rx_pool;
569 int i, j;
570
571 if (!adapter->rx_pool)
572 return;
573
574 for (i = 0; i < adapter->num_active_rx_pools; i++) {
575 rx_pool = &adapter->rx_pool[i];
576
577 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
578
579 kfree(rx_pool->free_map);
580
581 free_long_term_buff(adapter, &rx_pool->long_term_buff);
582
583 if (!rx_pool->rx_buff)
584 continue;
585
586 for (j = 0; j < rx_pool->size; j++) {
587 if (rx_pool->rx_buff[j].skb) {
588 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
589 rx_pool->rx_buff[j].skb = NULL;
590 }
591 }
592
593 kfree(rx_pool->rx_buff);
594 }
595
596 kfree(adapter->rx_pool);
597 adapter->rx_pool = NULL;
598 adapter->num_active_rx_pools = 0;
599 adapter->prev_rx_pool_size = 0;
600}
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
617{
618 u64 old_num_pools, new_num_pools;
619 u64 old_pool_size, new_pool_size;
620 u64 old_buff_size, new_buff_size;
621
622 if (!adapter->rx_pool)
623 return false;
624
625 old_num_pools = adapter->num_active_rx_pools;
626 new_num_pools = adapter->req_rx_queues;
627
628 old_pool_size = adapter->prev_rx_pool_size;
629 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
630
631 old_buff_size = adapter->prev_rx_buf_sz;
632 new_buff_size = adapter->cur_rx_buf_sz;
633
634 if (old_buff_size != new_buff_size ||
635 old_num_pools != new_num_pools ||
636 old_pool_size != new_pool_size)
637 return false;
638
639 return true;
640}
641
642
643
644
645
646
647
648
649
650
651
652
653static int init_rx_pools(struct net_device *netdev)
654{
655 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
656 struct device *dev = &adapter->vdev->dev;
657 struct ibmvnic_rx_pool *rx_pool;
658 u64 num_pools;
659 u64 pool_size;
660 u64 buff_size;
661 int i, j, rc;
662
663 pool_size = adapter->req_rx_add_entries_per_subcrq;
664 num_pools = adapter->req_rx_queues;
665 buff_size = adapter->cur_rx_buf_sz;
666
667 if (reuse_rx_pools(adapter)) {
668 dev_dbg(dev, "Reusing rx pools\n");
669 goto update_ltb;
670 }
671
672
673 release_rx_pools(adapter);
674
675 adapter->rx_pool = kcalloc(num_pools,
676 sizeof(struct ibmvnic_rx_pool),
677 GFP_KERNEL);
678 if (!adapter->rx_pool) {
679 dev_err(dev, "Failed to allocate rx pools\n");
680 return -ENOMEM;
681 }
682
683
684
685
686 adapter->num_active_rx_pools = num_pools;
687
688 for (i = 0; i < num_pools; i++) {
689 rx_pool = &adapter->rx_pool[i];
690
691 netdev_dbg(adapter->netdev,
692 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
693 i, pool_size, buff_size);
694
695 rx_pool->size = pool_size;
696 rx_pool->index = i;
697 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
698
699 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
700 GFP_KERNEL);
701 if (!rx_pool->free_map) {
702 dev_err(dev, "Couldn't alloc free_map %d\n", i);
703 rc = -ENOMEM;
704 goto out_release;
705 }
706
707 rx_pool->rx_buff = kcalloc(rx_pool->size,
708 sizeof(struct ibmvnic_rx_buff),
709 GFP_KERNEL);
710 if (!rx_pool->rx_buff) {
711 dev_err(dev, "Couldn't alloc rx buffers\n");
712 rc = -ENOMEM;
713 goto out_release;
714 }
715 }
716
717 adapter->prev_rx_pool_size = pool_size;
718 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
719
720update_ltb:
721 for (i = 0; i < num_pools; i++) {
722 rx_pool = &adapter->rx_pool[i];
723 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
724 i, rx_pool->size, rx_pool->buff_size);
725
726 rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
727 rx_pool->size * rx_pool->buff_size);
728 if (rc)
729 goto out;
730
731 for (j = 0; j < rx_pool->size; ++j) {
732 struct ibmvnic_rx_buff *rx_buff;
733
734 rx_pool->free_map[j] = j;
735
736
737
738
739
740 rx_buff = &rx_pool->rx_buff[j];
741 rx_buff->dma = 0;
742 rx_buff->data = 0;
743 rx_buff->size = 0;
744 rx_buff->pool_index = 0;
745 }
746
747
748
749
750 atomic_set(&rx_pool->available, 0);
751 rx_pool->next_alloc = 0;
752 rx_pool->next_free = 0;
753
754
755
756 rx_pool->active = 1;
757 }
758 return 0;
759out_release:
760 release_rx_pools(adapter);
761out:
762
763
764
765 return rc;
766}
767
768static void release_vpd_data(struct ibmvnic_adapter *adapter)
769{
770 if (!adapter->vpd)
771 return;
772
773 kfree(adapter->vpd->buff);
774 kfree(adapter->vpd);
775
776 adapter->vpd = NULL;
777}
778
779static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
780 struct ibmvnic_tx_pool *tx_pool)
781{
782 kfree(tx_pool->tx_buff);
783 kfree(tx_pool->free_map);
784 free_long_term_buff(adapter, &tx_pool->long_term_buff);
785}
786
787
788
789
790
791
792
793static void release_tx_pools(struct ibmvnic_adapter *adapter)
794{
795 int i;
796
797
798
799
800 if (!adapter->tx_pool)
801 return;
802
803 for (i = 0; i < adapter->num_active_tx_pools; i++) {
804 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
805 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
806 }
807
808 kfree(adapter->tx_pool);
809 adapter->tx_pool = NULL;
810 kfree(adapter->tso_pool);
811 adapter->tso_pool = NULL;
812 adapter->num_active_tx_pools = 0;
813 adapter->prev_tx_pool_size = 0;
814}
815
816static int init_one_tx_pool(struct net_device *netdev,
817 struct ibmvnic_tx_pool *tx_pool,
818 int pool_size, int buf_size)
819{
820 int i;
821
822 tx_pool->tx_buff = kcalloc(pool_size,
823 sizeof(struct ibmvnic_tx_buff),
824 GFP_KERNEL);
825 if (!tx_pool->tx_buff)
826 return -ENOMEM;
827
828 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
829 if (!tx_pool->free_map) {
830 kfree(tx_pool->tx_buff);
831 tx_pool->tx_buff = NULL;
832 return -ENOMEM;
833 }
834
835 for (i = 0; i < pool_size; i++)
836 tx_pool->free_map[i] = i;
837
838 tx_pool->consumer_index = 0;
839 tx_pool->producer_index = 0;
840 tx_pool->num_buffers = pool_size;
841 tx_pool->buf_size = buf_size;
842
843 return 0;
844}
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
860{
861 u64 old_num_pools, new_num_pools;
862 u64 old_pool_size, new_pool_size;
863 u64 old_mtu, new_mtu;
864
865 if (!adapter->tx_pool)
866 return false;
867
868 old_num_pools = adapter->num_active_tx_pools;
869 new_num_pools = adapter->num_active_tx_scrqs;
870 old_pool_size = adapter->prev_tx_pool_size;
871 new_pool_size = adapter->req_tx_entries_per_subcrq;
872 old_mtu = adapter->prev_mtu;
873 new_mtu = adapter->req_mtu;
874
875 if (old_mtu != new_mtu ||
876 old_num_pools != new_num_pools ||
877 old_pool_size != new_pool_size)
878 return false;
879
880 return true;
881}
882
883
884
885
886
887
888
889
890
891
892
893
894static int init_tx_pools(struct net_device *netdev)
895{
896 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
897 struct device *dev = &adapter->vdev->dev;
898 int num_pools;
899 u64 pool_size;
900 u64 buff_size;
901 int i, j, rc;
902
903 num_pools = adapter->req_tx_queues;
904
905
906
907
908
909 if (reuse_tx_pools(adapter)) {
910 netdev_dbg(netdev, "Reusing tx pools\n");
911 goto update_ltb;
912 }
913
914
915 release_tx_pools(adapter);
916
917 pool_size = adapter->req_tx_entries_per_subcrq;
918 num_pools = adapter->num_active_tx_scrqs;
919
920 adapter->tx_pool = kcalloc(num_pools,
921 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
922 if (!adapter->tx_pool)
923 return -ENOMEM;
924
925 adapter->tso_pool = kcalloc(num_pools,
926 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
927
928
929
930 if (!adapter->tso_pool) {
931 kfree(adapter->tx_pool);
932 adapter->tx_pool = NULL;
933 return -ENOMEM;
934 }
935
936
937
938
939 adapter->num_active_tx_pools = num_pools;
940
941 buff_size = adapter->req_mtu + VLAN_HLEN;
942 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
943
944 for (i = 0; i < num_pools; i++) {
945 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
946 i, adapter->req_tx_entries_per_subcrq, buff_size);
947
948 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
949 pool_size, buff_size);
950 if (rc)
951 goto out_release;
952
953 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
954 IBMVNIC_TSO_BUFS,
955 IBMVNIC_TSO_BUF_SZ);
956 if (rc)
957 goto out_release;
958 }
959
960 adapter->prev_tx_pool_size = pool_size;
961 adapter->prev_mtu = adapter->req_mtu;
962
963update_ltb:
964
965
966
967
968
969
970 rc = -1;
971 for (i = 0; i < num_pools; i++) {
972 struct ibmvnic_tx_pool *tso_pool;
973 struct ibmvnic_tx_pool *tx_pool;
974 u32 ltb_size;
975
976 tx_pool = &adapter->tx_pool[i];
977 ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
978 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
979 ltb_size))
980 goto out;
981
982 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
983 i, tx_pool->long_term_buff.buff,
984 tx_pool->num_buffers, tx_pool->buf_size);
985
986 tx_pool->consumer_index = 0;
987 tx_pool->producer_index = 0;
988
989 for (j = 0; j < tx_pool->num_buffers; j++)
990 tx_pool->free_map[j] = j;
991
992 tso_pool = &adapter->tso_pool[i];
993 ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
994 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
995 ltb_size))
996 goto out;
997
998 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
999 i, tso_pool->long_term_buff.buff,
1000 tso_pool->num_buffers, tso_pool->buf_size);
1001
1002 tso_pool->consumer_index = 0;
1003 tso_pool->producer_index = 0;
1004
1005 for (j = 0; j < tso_pool->num_buffers; j++)
1006 tso_pool->free_map[j] = j;
1007 }
1008
1009 return 0;
1010out_release:
1011 release_tx_pools(adapter);
1012out:
1013
1014
1015
1016 return rc;
1017}
1018
1019static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1020{
1021 int i;
1022
1023 if (adapter->napi_enabled)
1024 return;
1025
1026 for (i = 0; i < adapter->req_rx_queues; i++)
1027 napi_enable(&adapter->napi[i]);
1028
1029 adapter->napi_enabled = true;
1030}
1031
1032static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1033{
1034 int i;
1035
1036 if (!adapter->napi_enabled)
1037 return;
1038
1039 for (i = 0; i < adapter->req_rx_queues; i++) {
1040 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1041 napi_disable(&adapter->napi[i]);
1042 }
1043
1044 adapter->napi_enabled = false;
1045}
1046
1047static int init_napi(struct ibmvnic_adapter *adapter)
1048{
1049 int i;
1050
1051 adapter->napi = kcalloc(adapter->req_rx_queues,
1052 sizeof(struct napi_struct), GFP_KERNEL);
1053 if (!adapter->napi)
1054 return -ENOMEM;
1055
1056 for (i = 0; i < adapter->req_rx_queues; i++) {
1057 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1058 netif_napi_add(adapter->netdev, &adapter->napi[i],
1059 ibmvnic_poll, NAPI_POLL_WEIGHT);
1060 }
1061
1062 adapter->num_active_rx_napi = adapter->req_rx_queues;
1063 return 0;
1064}
1065
1066static void release_napi(struct ibmvnic_adapter *adapter)
1067{
1068 int i;
1069
1070 if (!adapter->napi)
1071 return;
1072
1073 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1074 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1075 netif_napi_del(&adapter->napi[i]);
1076 }
1077
1078 kfree(adapter->napi);
1079 adapter->napi = NULL;
1080 adapter->num_active_rx_napi = 0;
1081 adapter->napi_enabled = false;
1082}
1083
1084static const char *adapter_state_to_string(enum vnic_state state)
1085{
1086 switch (state) {
1087 case VNIC_PROBING:
1088 return "PROBING";
1089 case VNIC_PROBED:
1090 return "PROBED";
1091 case VNIC_OPENING:
1092 return "OPENING";
1093 case VNIC_OPEN:
1094 return "OPEN";
1095 case VNIC_CLOSING:
1096 return "CLOSING";
1097 case VNIC_CLOSED:
1098 return "CLOSED";
1099 case VNIC_REMOVING:
1100 return "REMOVING";
1101 case VNIC_REMOVED:
1102 return "REMOVED";
1103 case VNIC_DOWN:
1104 return "DOWN";
1105 }
1106 return "UNKNOWN";
1107}
1108
1109static int ibmvnic_login(struct net_device *netdev)
1110{
1111 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1112 unsigned long timeout = msecs_to_jiffies(20000);
1113 int retry_count = 0;
1114 int retries = 10;
1115 bool retry;
1116 int rc;
1117
1118 do {
1119 retry = false;
1120 if (retry_count > retries) {
1121 netdev_warn(netdev, "Login attempts exceeded\n");
1122 return -EACCES;
1123 }
1124
1125 adapter->init_done_rc = 0;
1126 reinit_completion(&adapter->init_done);
1127 rc = send_login(adapter);
1128 if (rc)
1129 return rc;
1130
1131 if (!wait_for_completion_timeout(&adapter->init_done,
1132 timeout)) {
1133 netdev_warn(netdev, "Login timed out, retrying...\n");
1134 retry = true;
1135 adapter->init_done_rc = 0;
1136 retry_count++;
1137 continue;
1138 }
1139
1140 if (adapter->init_done_rc == ABORTED) {
1141 netdev_warn(netdev, "Login aborted, retrying...\n");
1142 retry = true;
1143 adapter->init_done_rc = 0;
1144 retry_count++;
1145
1146
1147
1148 msleep(500);
1149 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1150 retry_count++;
1151 release_sub_crqs(adapter, 1);
1152
1153 retry = true;
1154 netdev_dbg(netdev,
1155 "Received partial success, retrying...\n");
1156 adapter->init_done_rc = 0;
1157 reinit_completion(&adapter->init_done);
1158 send_query_cap(adapter);
1159 if (!wait_for_completion_timeout(&adapter->init_done,
1160 timeout)) {
1161 netdev_warn(netdev,
1162 "Capabilities query timed out\n");
1163 return -ETIMEDOUT;
1164 }
1165
1166 rc = init_sub_crqs(adapter);
1167 if (rc) {
1168 netdev_warn(netdev,
1169 "SCRQ initialization failed\n");
1170 return rc;
1171 }
1172
1173 rc = init_sub_crq_irqs(adapter);
1174 if (rc) {
1175 netdev_warn(netdev,
1176 "SCRQ irq initialization failed\n");
1177 return rc;
1178 }
1179 } else if (adapter->init_done_rc) {
1180 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1181 adapter->init_done_rc);
1182 return -EIO;
1183 }
1184 } while (retry);
1185
1186 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1187
1188 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1189 return 0;
1190}
1191
1192static void release_login_buffer(struct ibmvnic_adapter *adapter)
1193{
1194 kfree(adapter->login_buf);
1195 adapter->login_buf = NULL;
1196}
1197
1198static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1199{
1200 kfree(adapter->login_rsp_buf);
1201 adapter->login_rsp_buf = NULL;
1202}
1203
1204static void release_resources(struct ibmvnic_adapter *adapter)
1205{
1206 release_vpd_data(adapter);
1207
1208 release_napi(adapter);
1209 release_login_buffer(adapter);
1210 release_login_rsp_buffer(adapter);
1211}
1212
1213static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1214{
1215 struct net_device *netdev = adapter->netdev;
1216 unsigned long timeout = msecs_to_jiffies(20000);
1217 union ibmvnic_crq crq;
1218 bool resend;
1219 int rc;
1220
1221 netdev_dbg(netdev, "setting link state %d\n", link_state);
1222
1223 memset(&crq, 0, sizeof(crq));
1224 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1225 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1226 crq.logical_link_state.link_state = link_state;
1227
1228 do {
1229 resend = false;
1230
1231 reinit_completion(&adapter->init_done);
1232 rc = ibmvnic_send_crq(adapter, &crq);
1233 if (rc) {
1234 netdev_err(netdev, "Failed to set link state\n");
1235 return rc;
1236 }
1237
1238 if (!wait_for_completion_timeout(&adapter->init_done,
1239 timeout)) {
1240 netdev_err(netdev, "timeout setting link state\n");
1241 return -ETIMEDOUT;
1242 }
1243
1244 if (adapter->init_done_rc == PARTIALSUCCESS) {
1245
1246 mdelay(1000);
1247 resend = true;
1248 } else if (adapter->init_done_rc) {
1249 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1250 adapter->init_done_rc);
1251 return adapter->init_done_rc;
1252 }
1253 } while (resend);
1254
1255 return 0;
1256}
1257
1258static int set_real_num_queues(struct net_device *netdev)
1259{
1260 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1261 int rc;
1262
1263 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1264 adapter->req_tx_queues, adapter->req_rx_queues);
1265
1266 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1267 if (rc) {
1268 netdev_err(netdev, "failed to set the number of tx queues\n");
1269 return rc;
1270 }
1271
1272 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1273 if (rc)
1274 netdev_err(netdev, "failed to set the number of rx queues\n");
1275
1276 return rc;
1277}
1278
1279static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1280{
1281 struct device *dev = &adapter->vdev->dev;
1282 union ibmvnic_crq crq;
1283 int len = 0;
1284 int rc;
1285
1286 if (adapter->vpd->buff)
1287 len = adapter->vpd->len;
1288
1289 mutex_lock(&adapter->fw_lock);
1290 adapter->fw_done_rc = 0;
1291 reinit_completion(&adapter->fw_done);
1292
1293 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1294 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1295 rc = ibmvnic_send_crq(adapter, &crq);
1296 if (rc) {
1297 mutex_unlock(&adapter->fw_lock);
1298 return rc;
1299 }
1300
1301 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1302 if (rc) {
1303 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1304 mutex_unlock(&adapter->fw_lock);
1305 return rc;
1306 }
1307 mutex_unlock(&adapter->fw_lock);
1308
1309 if (!adapter->vpd->len)
1310 return -ENODATA;
1311
1312 if (!adapter->vpd->buff)
1313 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1314 else if (adapter->vpd->len != len)
1315 adapter->vpd->buff =
1316 krealloc(adapter->vpd->buff,
1317 adapter->vpd->len, GFP_KERNEL);
1318
1319 if (!adapter->vpd->buff) {
1320 dev_err(dev, "Could allocate VPD buffer\n");
1321 return -ENOMEM;
1322 }
1323
1324 adapter->vpd->dma_addr =
1325 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1326 DMA_FROM_DEVICE);
1327 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1328 dev_err(dev, "Could not map VPD buffer\n");
1329 kfree(adapter->vpd->buff);
1330 adapter->vpd->buff = NULL;
1331 return -ENOMEM;
1332 }
1333
1334 mutex_lock(&adapter->fw_lock);
1335 adapter->fw_done_rc = 0;
1336 reinit_completion(&adapter->fw_done);
1337
1338 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1339 crq.get_vpd.cmd = GET_VPD;
1340 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1341 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1342 rc = ibmvnic_send_crq(adapter, &crq);
1343 if (rc) {
1344 kfree(adapter->vpd->buff);
1345 adapter->vpd->buff = NULL;
1346 mutex_unlock(&adapter->fw_lock);
1347 return rc;
1348 }
1349
1350 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1351 if (rc) {
1352 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1353 kfree(adapter->vpd->buff);
1354 adapter->vpd->buff = NULL;
1355 mutex_unlock(&adapter->fw_lock);
1356 return rc;
1357 }
1358
1359 mutex_unlock(&adapter->fw_lock);
1360 return 0;
1361}
1362
1363static int init_resources(struct ibmvnic_adapter *adapter)
1364{
1365 struct net_device *netdev = adapter->netdev;
1366 int rc;
1367
1368 rc = set_real_num_queues(netdev);
1369 if (rc)
1370 return rc;
1371
1372 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1373 if (!adapter->vpd)
1374 return -ENOMEM;
1375
1376
1377 rc = ibmvnic_get_vpd(adapter);
1378 if (rc) {
1379 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1380 return rc;
1381 }
1382
1383 rc = init_napi(adapter);
1384 if (rc)
1385 return rc;
1386
1387 send_query_map(adapter);
1388
1389 rc = init_rx_pools(netdev);
1390 if (rc)
1391 return rc;
1392
1393 rc = init_tx_pools(netdev);
1394 return rc;
1395}
1396
1397static int __ibmvnic_open(struct net_device *netdev)
1398{
1399 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1400 enum vnic_state prev_state = adapter->state;
1401 int i, rc;
1402
1403 adapter->state = VNIC_OPENING;
1404 replenish_pools(adapter);
1405 ibmvnic_napi_enable(adapter);
1406
1407
1408
1409
1410 for (i = 0; i < adapter->req_rx_queues; i++) {
1411 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1412 if (prev_state == VNIC_CLOSED)
1413 enable_irq(adapter->rx_scrq[i]->irq);
1414 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1415 }
1416
1417 for (i = 0; i < adapter->req_tx_queues; i++) {
1418 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1419 if (prev_state == VNIC_CLOSED)
1420 enable_irq(adapter->tx_scrq[i]->irq);
1421 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1422 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1423 }
1424
1425 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1426 if (rc) {
1427 ibmvnic_napi_disable(adapter);
1428 ibmvnic_disable_irqs(adapter);
1429 return rc;
1430 }
1431
1432 netif_tx_start_all_queues(netdev);
1433
1434 if (prev_state == VNIC_CLOSED) {
1435 for (i = 0; i < adapter->req_rx_queues; i++)
1436 napi_schedule(&adapter->napi[i]);
1437 }
1438
1439 adapter->state = VNIC_OPEN;
1440 return rc;
1441}
1442
1443static int ibmvnic_open(struct net_device *netdev)
1444{
1445 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1446 int rc;
1447
1448 ASSERT_RTNL();
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1462 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1463 adapter_state_to_string(adapter->state),
1464 adapter->failover_pending);
1465 adapter->state = VNIC_OPEN;
1466 rc = 0;
1467 goto out;
1468 }
1469
1470 if (adapter->state != VNIC_CLOSED) {
1471 rc = ibmvnic_login(netdev);
1472 if (rc)
1473 goto out;
1474
1475 rc = init_resources(adapter);
1476 if (rc) {
1477 netdev_err(netdev, "failed to initialize resources\n");
1478 goto out;
1479 }
1480 }
1481
1482 rc = __ibmvnic_open(netdev);
1483
1484out:
1485
1486
1487
1488
1489 if (rc &&
1490 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1491 adapter->state = VNIC_OPEN;
1492 rc = 0;
1493 }
1494
1495 if (rc) {
1496 release_resources(adapter);
1497 release_rx_pools(adapter);
1498 release_tx_pools(adapter);
1499 }
1500
1501 return rc;
1502}
1503
1504static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1505{
1506 struct ibmvnic_rx_pool *rx_pool;
1507 struct ibmvnic_rx_buff *rx_buff;
1508 u64 rx_entries;
1509 int rx_scrqs;
1510 int i, j;
1511
1512 if (!adapter->rx_pool)
1513 return;
1514
1515 rx_scrqs = adapter->num_active_rx_pools;
1516 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1517
1518
1519 for (i = 0; i < rx_scrqs; i++) {
1520 rx_pool = &adapter->rx_pool[i];
1521 if (!rx_pool || !rx_pool->rx_buff)
1522 continue;
1523
1524 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1525 for (j = 0; j < rx_entries; j++) {
1526 rx_buff = &rx_pool->rx_buff[j];
1527 if (rx_buff && rx_buff->skb) {
1528 dev_kfree_skb_any(rx_buff->skb);
1529 rx_buff->skb = NULL;
1530 }
1531 }
1532 }
1533}
1534
1535static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1536 struct ibmvnic_tx_pool *tx_pool)
1537{
1538 struct ibmvnic_tx_buff *tx_buff;
1539 u64 tx_entries;
1540 int i;
1541
1542 if (!tx_pool || !tx_pool->tx_buff)
1543 return;
1544
1545 tx_entries = tx_pool->num_buffers;
1546
1547 for (i = 0; i < tx_entries; i++) {
1548 tx_buff = &tx_pool->tx_buff[i];
1549 if (tx_buff && tx_buff->skb) {
1550 dev_kfree_skb_any(tx_buff->skb);
1551 tx_buff->skb = NULL;
1552 }
1553 }
1554}
1555
1556static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1557{
1558 int tx_scrqs;
1559 int i;
1560
1561 if (!adapter->tx_pool || !adapter->tso_pool)
1562 return;
1563
1564 tx_scrqs = adapter->num_active_tx_pools;
1565
1566
1567 for (i = 0; i < tx_scrqs; i++) {
1568 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1569 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1570 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1571 }
1572}
1573
1574static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1575{
1576 struct net_device *netdev = adapter->netdev;
1577 int i;
1578
1579 if (adapter->tx_scrq) {
1580 for (i = 0; i < adapter->req_tx_queues; i++)
1581 if (adapter->tx_scrq[i]->irq) {
1582 netdev_dbg(netdev,
1583 "Disabling tx_scrq[%d] irq\n", i);
1584 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1585 disable_irq(adapter->tx_scrq[i]->irq);
1586 }
1587 }
1588
1589 if (adapter->rx_scrq) {
1590 for (i = 0; i < adapter->req_rx_queues; i++) {
1591 if (adapter->rx_scrq[i]->irq) {
1592 netdev_dbg(netdev,
1593 "Disabling rx_scrq[%d] irq\n", i);
1594 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1595 disable_irq(adapter->rx_scrq[i]->irq);
1596 }
1597 }
1598 }
1599}
1600
1601static void ibmvnic_cleanup(struct net_device *netdev)
1602{
1603 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1604
1605
1606 if (test_bit(0, &adapter->resetting))
1607 netif_tx_disable(netdev);
1608 else
1609 netif_tx_stop_all_queues(netdev);
1610
1611 ibmvnic_napi_disable(adapter);
1612 ibmvnic_disable_irqs(adapter);
1613}
1614
1615static int __ibmvnic_close(struct net_device *netdev)
1616{
1617 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1618 int rc = 0;
1619
1620 adapter->state = VNIC_CLOSING;
1621 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1622 adapter->state = VNIC_CLOSED;
1623 return rc;
1624}
1625
1626static int ibmvnic_close(struct net_device *netdev)
1627{
1628 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1629 int rc;
1630
1631 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1632 adapter_state_to_string(adapter->state),
1633 adapter->failover_pending,
1634 adapter->force_reset_recovery);
1635
1636
1637
1638
1639 if (adapter->failover_pending) {
1640 adapter->state = VNIC_CLOSED;
1641 return 0;
1642 }
1643
1644 rc = __ibmvnic_close(netdev);
1645 ibmvnic_cleanup(netdev);
1646 clean_rx_pools(adapter);
1647 clean_tx_pools(adapter);
1648
1649 return rc;
1650}
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1664 int *hdr_len, u8 *hdr_data)
1665{
1666 int len = 0;
1667 u8 *hdr;
1668
1669 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1670 hdr_len[0] = sizeof(struct vlan_ethhdr);
1671 else
1672 hdr_len[0] = sizeof(struct ethhdr);
1673
1674 if (skb->protocol == htons(ETH_P_IP)) {
1675 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1676 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1677 hdr_len[2] = tcp_hdrlen(skb);
1678 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1679 hdr_len[2] = sizeof(struct udphdr);
1680 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1681 hdr_len[1] = sizeof(struct ipv6hdr);
1682 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1683 hdr_len[2] = tcp_hdrlen(skb);
1684 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1685 hdr_len[2] = sizeof(struct udphdr);
1686 } else if (skb->protocol == htons(ETH_P_ARP)) {
1687 hdr_len[1] = arp_hdr_len(skb->dev);
1688 hdr_len[2] = 0;
1689 }
1690
1691 memset(hdr_data, 0, 120);
1692 if ((hdr_field >> 6) & 1) {
1693 hdr = skb_mac_header(skb);
1694 memcpy(hdr_data, hdr, hdr_len[0]);
1695 len += hdr_len[0];
1696 }
1697
1698 if ((hdr_field >> 5) & 1) {
1699 hdr = skb_network_header(skb);
1700 memcpy(hdr_data + len, hdr, hdr_len[1]);
1701 len += hdr_len[1];
1702 }
1703
1704 if ((hdr_field >> 4) & 1) {
1705 hdr = skb_transport_header(skb);
1706 memcpy(hdr_data + len, hdr, hdr_len[2]);
1707 len += hdr_len[2];
1708 }
1709 return len;
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1725 union sub_crq *scrq_arr)
1726{
1727 union sub_crq hdr_desc;
1728 int tmp_len = len;
1729 int num_descs = 0;
1730 u8 *data, *cur;
1731 int tmp;
1732
1733 while (tmp_len > 0) {
1734 cur = hdr_data + len - tmp_len;
1735
1736 memset(&hdr_desc, 0, sizeof(hdr_desc));
1737 if (cur != hdr_data) {
1738 data = hdr_desc.hdr_ext.data;
1739 tmp = tmp_len > 29 ? 29 : tmp_len;
1740 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1741 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1742 hdr_desc.hdr_ext.len = tmp;
1743 } else {
1744 data = hdr_desc.hdr.data;
1745 tmp = tmp_len > 24 ? 24 : tmp_len;
1746 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1747 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1748 hdr_desc.hdr.len = tmp;
1749 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1750 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1751 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1752 hdr_desc.hdr.flag = hdr_field << 1;
1753 }
1754 memcpy(data, cur, tmp);
1755 tmp_len -= tmp;
1756 *scrq_arr = hdr_desc;
1757 scrq_arr++;
1758 num_descs++;
1759 }
1760
1761 return num_descs;
1762}
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static void build_hdr_descs_arr(struct sk_buff *skb,
1776 union sub_crq *indir_arr,
1777 int *num_entries, u8 hdr_field)
1778{
1779 int hdr_len[3] = {0, 0, 0};
1780 u8 hdr_data[140] = {0};
1781 int tot_len;
1782
1783 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1784 hdr_data);
1785 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1786 indir_arr + 1);
1787}
1788
1789static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1790 struct net_device *netdev)
1791{
1792
1793
1794
1795
1796
1797
1798 if (skb->len < netdev->min_mtu)
1799 return skb_put_padto(skb, netdev->min_mtu);
1800
1801 return 0;
1802}
1803
1804static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1805 struct ibmvnic_sub_crq_queue *tx_scrq)
1806{
1807 struct ibmvnic_ind_xmit_queue *ind_bufp;
1808 struct ibmvnic_tx_buff *tx_buff;
1809 struct ibmvnic_tx_pool *tx_pool;
1810 union sub_crq tx_scrq_entry;
1811 int queue_num;
1812 int entries;
1813 int index;
1814 int i;
1815
1816 ind_bufp = &tx_scrq->ind_buf;
1817 entries = (u64)ind_bufp->index;
1818 queue_num = tx_scrq->pool_index;
1819
1820 for (i = entries - 1; i >= 0; --i) {
1821 tx_scrq_entry = ind_bufp->indir_arr[i];
1822 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1823 continue;
1824 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1825 if (index & IBMVNIC_TSO_POOL_MASK) {
1826 tx_pool = &adapter->tso_pool[queue_num];
1827 index &= ~IBMVNIC_TSO_POOL_MASK;
1828 } else {
1829 tx_pool = &adapter->tx_pool[queue_num];
1830 }
1831 tx_pool->free_map[tx_pool->consumer_index] = index;
1832 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1833 tx_pool->num_buffers - 1 :
1834 tx_pool->consumer_index - 1;
1835 tx_buff = &tx_pool->tx_buff[index];
1836 adapter->netdev->stats.tx_packets--;
1837 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1838 adapter->tx_stats_buffers[queue_num].packets--;
1839 adapter->tx_stats_buffers[queue_num].bytes -=
1840 tx_buff->skb->len;
1841 dev_kfree_skb_any(tx_buff->skb);
1842 tx_buff->skb = NULL;
1843 adapter->netdev->stats.tx_dropped++;
1844 }
1845 ind_bufp->index = 0;
1846 if (atomic_sub_return(entries, &tx_scrq->used) <=
1847 (adapter->req_tx_entries_per_subcrq / 2) &&
1848 __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1849 !test_bit(0, &adapter->resetting)) {
1850 netif_wake_subqueue(adapter->netdev, queue_num);
1851 netdev_dbg(adapter->netdev, "Started queue %d\n",
1852 queue_num);
1853 }
1854}
1855
1856static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1857 struct ibmvnic_sub_crq_queue *tx_scrq)
1858{
1859 struct ibmvnic_ind_xmit_queue *ind_bufp;
1860 u64 dma_addr;
1861 u64 entries;
1862 u64 handle;
1863 int rc;
1864
1865 ind_bufp = &tx_scrq->ind_buf;
1866 dma_addr = (u64)ind_bufp->indir_dma;
1867 entries = (u64)ind_bufp->index;
1868 handle = tx_scrq->handle;
1869
1870 if (!entries)
1871 return 0;
1872 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1873 if (rc)
1874 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1875 else
1876 ind_bufp->index = 0;
1877 return 0;
1878}
1879
1880static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1881{
1882 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1883 int queue_num = skb_get_queue_mapping(skb);
1884 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1885 struct device *dev = &adapter->vdev->dev;
1886 struct ibmvnic_ind_xmit_queue *ind_bufp;
1887 struct ibmvnic_tx_buff *tx_buff = NULL;
1888 struct ibmvnic_sub_crq_queue *tx_scrq;
1889 struct ibmvnic_tx_pool *tx_pool;
1890 unsigned int tx_send_failed = 0;
1891 netdev_tx_t ret = NETDEV_TX_OK;
1892 unsigned int tx_map_failed = 0;
1893 union sub_crq indir_arr[16];
1894 unsigned int tx_dropped = 0;
1895 unsigned int tx_packets = 0;
1896 unsigned int tx_bytes = 0;
1897 dma_addr_t data_dma_addr;
1898 struct netdev_queue *txq;
1899 unsigned long lpar_rc;
1900 union sub_crq tx_crq;
1901 unsigned int offset;
1902 int num_entries = 1;
1903 unsigned char *dst;
1904 int index = 0;
1905 u8 proto = 0;
1906
1907 tx_scrq = adapter->tx_scrq[queue_num];
1908 txq = netdev_get_tx_queue(netdev, queue_num);
1909 ind_bufp = &tx_scrq->ind_buf;
1910
1911 if (test_bit(0, &adapter->resetting)) {
1912 dev_kfree_skb_any(skb);
1913
1914 tx_send_failed++;
1915 tx_dropped++;
1916 ret = NETDEV_TX_OK;
1917 goto out;
1918 }
1919
1920 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1921 tx_dropped++;
1922 tx_send_failed++;
1923 ret = NETDEV_TX_OK;
1924 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1925 goto out;
1926 }
1927 if (skb_is_gso(skb))
1928 tx_pool = &adapter->tso_pool[queue_num];
1929 else
1930 tx_pool = &adapter->tx_pool[queue_num];
1931
1932 index = tx_pool->free_map[tx_pool->consumer_index];
1933
1934 if (index == IBMVNIC_INVALID_MAP) {
1935 dev_kfree_skb_any(skb);
1936 tx_send_failed++;
1937 tx_dropped++;
1938 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1939 ret = NETDEV_TX_OK;
1940 goto out;
1941 }
1942
1943 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1944
1945 offset = index * tx_pool->buf_size;
1946 dst = tx_pool->long_term_buff.buff + offset;
1947 memset(dst, 0, tx_pool->buf_size);
1948 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1949
1950 if (skb_shinfo(skb)->nr_frags) {
1951 int cur, i;
1952
1953
1954 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1955 cur = skb_headlen(skb);
1956
1957
1958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1959 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1960
1961 memcpy(dst + cur, skb_frag_address(frag),
1962 skb_frag_size(frag));
1963 cur += skb_frag_size(frag);
1964 }
1965 } else {
1966 skb_copy_from_linear_data(skb, dst, skb->len);
1967 }
1968
1969
1970 dma_wmb();
1971
1972 tx_pool->consumer_index =
1973 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1974
1975 tx_buff = &tx_pool->tx_buff[index];
1976 tx_buff->skb = skb;
1977 tx_buff->index = index;
1978 tx_buff->pool_index = queue_num;
1979
1980 memset(&tx_crq, 0, sizeof(tx_crq));
1981 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1982 tx_crq.v1.type = IBMVNIC_TX_DESC;
1983 tx_crq.v1.n_crq_elem = 1;
1984 tx_crq.v1.n_sge = 1;
1985 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1986
1987 if (skb_is_gso(skb))
1988 tx_crq.v1.correlator =
1989 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1990 else
1991 tx_crq.v1.correlator = cpu_to_be32(index);
1992 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1993 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1994 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1995
1996 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1997 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1998 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1999 }
2000
2001 if (skb->protocol == htons(ETH_P_IP)) {
2002 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2003 proto = ip_hdr(skb)->protocol;
2004 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2005 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2006 proto = ipv6_hdr(skb)->nexthdr;
2007 }
2008
2009 if (proto == IPPROTO_TCP)
2010 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2011 else if (proto == IPPROTO_UDP)
2012 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2013
2014 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2015 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2016 hdrs += 2;
2017 }
2018 if (skb_is_gso(skb)) {
2019 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2020 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2021 hdrs += 2;
2022 }
2023
2024 if ((*hdrs >> 7) & 1)
2025 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2026
2027 tx_crq.v1.n_crq_elem = num_entries;
2028 tx_buff->num_entries = num_entries;
2029
2030 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2031 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2032 if (lpar_rc != H_SUCCESS)
2033 goto tx_flush_err;
2034 }
2035
2036 indir_arr[0] = tx_crq;
2037 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2038 num_entries * sizeof(struct ibmvnic_generic_scrq));
2039 ind_bufp->index += num_entries;
2040 if (__netdev_tx_sent_queue(txq, skb->len,
2041 netdev_xmit_more() &&
2042 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2043 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2044 if (lpar_rc != H_SUCCESS)
2045 goto tx_err;
2046 }
2047
2048 if (atomic_add_return(num_entries, &tx_scrq->used)
2049 >= adapter->req_tx_entries_per_subcrq) {
2050 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2051 netif_stop_subqueue(netdev, queue_num);
2052 }
2053
2054 tx_packets++;
2055 tx_bytes += skb->len;
2056 txq_trans_cond_update(txq);
2057 ret = NETDEV_TX_OK;
2058 goto out;
2059
2060tx_flush_err:
2061 dev_kfree_skb_any(skb);
2062 tx_buff->skb = NULL;
2063 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2064 tx_pool->num_buffers - 1 :
2065 tx_pool->consumer_index - 1;
2066 tx_dropped++;
2067tx_err:
2068 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2069 dev_err_ratelimited(dev, "tx: send failed\n");
2070
2071 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2072
2073
2074
2075
2076
2077 netif_tx_stop_all_queues(netdev);
2078 netif_carrier_off(netdev);
2079 }
2080out:
2081 netdev->stats.tx_dropped += tx_dropped;
2082 netdev->stats.tx_bytes += tx_bytes;
2083 netdev->stats.tx_packets += tx_packets;
2084 adapter->tx_send_failed += tx_send_failed;
2085 adapter->tx_map_failed += tx_map_failed;
2086 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2087 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2088 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2089
2090 return ret;
2091}
2092
2093static void ibmvnic_set_multi(struct net_device *netdev)
2094{
2095 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2096 struct netdev_hw_addr *ha;
2097 union ibmvnic_crq crq;
2098
2099 memset(&crq, 0, sizeof(crq));
2100 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2101 crq.request_capability.cmd = REQUEST_CAPABILITY;
2102
2103 if (netdev->flags & IFF_PROMISC) {
2104 if (!adapter->promisc_supported)
2105 return;
2106 } else {
2107 if (netdev->flags & IFF_ALLMULTI) {
2108
2109 memset(&crq, 0, sizeof(crq));
2110 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2111 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2112 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2113 ibmvnic_send_crq(adapter, &crq);
2114 } else if (netdev_mc_empty(netdev)) {
2115
2116 memset(&crq, 0, sizeof(crq));
2117 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2118 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2119 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2120 ibmvnic_send_crq(adapter, &crq);
2121 } else {
2122
2123 netdev_for_each_mc_addr(ha, netdev) {
2124 memset(&crq, 0, sizeof(crq));
2125 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2126 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2127 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2128 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2129 ha->addr);
2130 ibmvnic_send_crq(adapter, &crq);
2131 }
2132 }
2133 }
2134}
2135
2136static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2137{
2138 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2139 union ibmvnic_crq crq;
2140 int rc;
2141
2142 if (!is_valid_ether_addr(dev_addr)) {
2143 rc = -EADDRNOTAVAIL;
2144 goto err;
2145 }
2146
2147 memset(&crq, 0, sizeof(crq));
2148 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2149 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2150 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2151
2152 mutex_lock(&adapter->fw_lock);
2153 adapter->fw_done_rc = 0;
2154 reinit_completion(&adapter->fw_done);
2155
2156 rc = ibmvnic_send_crq(adapter, &crq);
2157 if (rc) {
2158 rc = -EIO;
2159 mutex_unlock(&adapter->fw_lock);
2160 goto err;
2161 }
2162
2163 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2164
2165 if (rc || adapter->fw_done_rc) {
2166 rc = -EIO;
2167 mutex_unlock(&adapter->fw_lock);
2168 goto err;
2169 }
2170 mutex_unlock(&adapter->fw_lock);
2171 return 0;
2172err:
2173 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2174 return rc;
2175}
2176
2177static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2178{
2179 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2180 struct sockaddr *addr = p;
2181 int rc;
2182
2183 rc = 0;
2184 if (!is_valid_ether_addr(addr->sa_data))
2185 return -EADDRNOTAVAIL;
2186
2187 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2188 if (adapter->state != VNIC_PROBED)
2189 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2190
2191 return rc;
2192}
2193
2194static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2195{
2196 switch (reason) {
2197 case VNIC_RESET_FAILOVER:
2198 return "FAILOVER";
2199 case VNIC_RESET_MOBILITY:
2200 return "MOBILITY";
2201 case VNIC_RESET_FATAL:
2202 return "FATAL";
2203 case VNIC_RESET_NON_FATAL:
2204 return "NON_FATAL";
2205 case VNIC_RESET_TIMEOUT:
2206 return "TIMEOUT";
2207 case VNIC_RESET_CHANGE_PARAM:
2208 return "CHANGE_PARAM";
2209 case VNIC_RESET_PASSIVE_INIT:
2210 return "PASSIVE_INIT";
2211 }
2212 return "UNKNOWN";
2213}
2214
2215
2216
2217
2218
2219
2220
2221
2222static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2223{
2224 reinit_completion(&adapter->init_done);
2225 adapter->init_done_rc = 0;
2226}
2227
2228
2229
2230
2231
2232static int do_reset(struct ibmvnic_adapter *adapter,
2233 struct ibmvnic_rwi *rwi, u32 reset_state)
2234{
2235 struct net_device *netdev = adapter->netdev;
2236 u64 old_num_rx_queues, old_num_tx_queues;
2237 u64 old_num_rx_slots, old_num_tx_slots;
2238 int rc;
2239
2240 netdev_dbg(adapter->netdev,
2241 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2242 adapter_state_to_string(adapter->state),
2243 adapter->failover_pending,
2244 reset_reason_to_string(rwi->reset_reason),
2245 adapter_state_to_string(reset_state));
2246
2247 adapter->reset_reason = rwi->reset_reason;
2248
2249 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2250 rtnl_lock();
2251
2252
2253
2254
2255
2256 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2257 adapter->failover_pending = false;
2258
2259
2260 reset_state = adapter->state;
2261
2262 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2263 rc = -EBUSY;
2264 goto out;
2265 }
2266
2267 netif_carrier_off(netdev);
2268
2269 old_num_rx_queues = adapter->req_rx_queues;
2270 old_num_tx_queues = adapter->req_tx_queues;
2271 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2272 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2273
2274 ibmvnic_cleanup(netdev);
2275
2276 if (reset_state == VNIC_OPEN &&
2277 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2278 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2279 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2280 rc = __ibmvnic_close(netdev);
2281 if (rc)
2282 goto out;
2283 } else {
2284 adapter->state = VNIC_CLOSING;
2285
2286
2287
2288
2289
2290
2291 rtnl_unlock();
2292 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2293 rtnl_lock();
2294 if (rc)
2295 goto out;
2296
2297 if (adapter->state == VNIC_OPEN) {
2298
2299
2300
2301
2302
2303
2304 netdev_dbg(netdev,
2305 "Open changed state from %s, updating.\n",
2306 adapter_state_to_string(reset_state));
2307 reset_state = VNIC_OPEN;
2308 adapter->state = VNIC_CLOSING;
2309 }
2310
2311 if (adapter->state != VNIC_CLOSING) {
2312
2313
2314
2315 rc = -EAGAIN;
2316 goto out;
2317 }
2318 adapter->state = VNIC_CLOSED;
2319 }
2320 }
2321
2322 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2323 release_resources(adapter);
2324 release_sub_crqs(adapter, 1);
2325 release_crq_queue(adapter);
2326 }
2327
2328 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2329
2330
2331
2332 adapter->state = VNIC_PROBED;
2333
2334 reinit_init_done(adapter);
2335
2336 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2337 rc = init_crq_queue(adapter);
2338 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2339 rc = ibmvnic_reenable_crq_queue(adapter);
2340 release_sub_crqs(adapter, 1);
2341 } else {
2342 rc = ibmvnic_reset_crq(adapter);
2343 if (rc == H_CLOSED || rc == H_SUCCESS) {
2344 rc = vio_enable_interrupts(adapter->vdev);
2345 if (rc)
2346 netdev_err(adapter->netdev,
2347 "Reset failed to enable interrupts. rc=%d\n",
2348 rc);
2349 }
2350 }
2351
2352 if (rc) {
2353 netdev_err(adapter->netdev,
2354 "Reset couldn't initialize crq. rc=%d\n", rc);
2355 goto out;
2356 }
2357
2358 rc = ibmvnic_reset_init(adapter, true);
2359 if (rc)
2360 goto out;
2361
2362
2363
2364
2365 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2366 rc = 0;
2367 goto out;
2368 }
2369
2370 rc = ibmvnic_login(netdev);
2371 if (rc)
2372 goto out;
2373
2374 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2375 rc = init_resources(adapter);
2376 if (rc)
2377 goto out;
2378 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2379 adapter->req_tx_queues != old_num_tx_queues ||
2380 adapter->req_rx_add_entries_per_subcrq !=
2381 old_num_rx_slots ||
2382 adapter->req_tx_entries_per_subcrq !=
2383 old_num_tx_slots ||
2384 !adapter->rx_pool ||
2385 !adapter->tso_pool ||
2386 !adapter->tx_pool) {
2387 release_napi(adapter);
2388 release_vpd_data(adapter);
2389
2390 rc = init_resources(adapter);
2391 if (rc)
2392 goto out;
2393
2394 } else {
2395 rc = init_tx_pools(netdev);
2396 if (rc) {
2397 netdev_dbg(netdev,
2398 "init tx pools failed (%d)\n",
2399 rc);
2400 goto out;
2401 }
2402
2403 rc = init_rx_pools(netdev);
2404 if (rc) {
2405 netdev_dbg(netdev,
2406 "init rx pools failed (%d)\n",
2407 rc);
2408 goto out;
2409 }
2410 }
2411 ibmvnic_disable_irqs(adapter);
2412 }
2413 adapter->state = VNIC_CLOSED;
2414
2415 if (reset_state == VNIC_CLOSED) {
2416 rc = 0;
2417 goto out;
2418 }
2419
2420 rc = __ibmvnic_open(netdev);
2421 if (rc) {
2422 rc = IBMVNIC_OPEN_FAILED;
2423 goto out;
2424 }
2425
2426
2427 ibmvnic_set_multi(netdev);
2428
2429 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2430 adapter->reset_reason == VNIC_RESET_MOBILITY)
2431 __netdev_notify_peers(netdev);
2432
2433 rc = 0;
2434
2435out:
2436
2437 if (rc)
2438 adapter->state = reset_state;
2439
2440 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2441 rtnl_unlock();
2442
2443 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2444 adapter_state_to_string(adapter->state),
2445 adapter->failover_pending, rc);
2446 return rc;
2447}
2448
2449static int do_hard_reset(struct ibmvnic_adapter *adapter,
2450 struct ibmvnic_rwi *rwi, u32 reset_state)
2451{
2452 struct net_device *netdev = adapter->netdev;
2453 int rc;
2454
2455 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2456 reset_reason_to_string(rwi->reset_reason));
2457
2458
2459 reset_state = adapter->state;
2460
2461 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2462 rc = -EBUSY;
2463 goto out;
2464 }
2465
2466 netif_carrier_off(netdev);
2467 adapter->reset_reason = rwi->reset_reason;
2468
2469 ibmvnic_cleanup(netdev);
2470 release_resources(adapter);
2471 release_sub_crqs(adapter, 0);
2472 release_crq_queue(adapter);
2473
2474
2475
2476
2477 adapter->state = VNIC_PROBED;
2478
2479 reinit_init_done(adapter);
2480
2481 rc = init_crq_queue(adapter);
2482 if (rc) {
2483 netdev_err(adapter->netdev,
2484 "Couldn't initialize crq. rc=%d\n", rc);
2485 goto out;
2486 }
2487
2488 rc = ibmvnic_reset_init(adapter, false);
2489 if (rc)
2490 goto out;
2491
2492
2493
2494
2495 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
2496 goto out;
2497
2498 rc = ibmvnic_login(netdev);
2499 if (rc)
2500 goto out;
2501
2502 rc = init_resources(adapter);
2503 if (rc)
2504 goto out;
2505
2506 ibmvnic_disable_irqs(adapter);
2507 adapter->state = VNIC_CLOSED;
2508
2509 if (reset_state == VNIC_CLOSED)
2510 goto out;
2511
2512 rc = __ibmvnic_open(netdev);
2513 if (rc) {
2514 rc = IBMVNIC_OPEN_FAILED;
2515 goto out;
2516 }
2517
2518 __netdev_notify_peers(netdev);
2519out:
2520
2521 if (rc)
2522 adapter->state = reset_state;
2523 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2524 adapter_state_to_string(adapter->state),
2525 adapter->failover_pending, rc);
2526 return rc;
2527}
2528
2529static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2530{
2531 struct ibmvnic_rwi *rwi;
2532 unsigned long flags;
2533
2534 spin_lock_irqsave(&adapter->rwi_lock, flags);
2535
2536 if (!list_empty(&adapter->rwi_list)) {
2537 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2538 list);
2539 list_del(&rwi->list);
2540 } else {
2541 rwi = NULL;
2542 }
2543
2544 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2545 return rwi;
2546}
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562static int do_passive_init(struct ibmvnic_adapter *adapter)
2563{
2564 unsigned long timeout = msecs_to_jiffies(30000);
2565 struct net_device *netdev = adapter->netdev;
2566 struct device *dev = &adapter->vdev->dev;
2567 int rc;
2568
2569 netdev_dbg(netdev, "Partner device found, probing.\n");
2570
2571 adapter->state = VNIC_PROBING;
2572 reinit_completion(&adapter->init_done);
2573 adapter->init_done_rc = 0;
2574 adapter->crq.active = true;
2575
2576 rc = send_crq_init_complete(adapter);
2577 if (rc)
2578 goto out;
2579
2580 rc = send_version_xchg(adapter);
2581 if (rc)
2582 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2583
2584 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2585 dev_err(dev, "Initialization sequence timed out\n");
2586 rc = -ETIMEDOUT;
2587 goto out;
2588 }
2589
2590 rc = init_sub_crqs(adapter);
2591 if (rc) {
2592 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2593 goto out;
2594 }
2595
2596 rc = init_sub_crq_irqs(adapter);
2597 if (rc) {
2598 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2599 goto init_failed;
2600 }
2601
2602 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2603 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2604 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2605
2606 adapter->state = VNIC_PROBED;
2607 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2608
2609 return 0;
2610
2611init_failed:
2612 release_sub_crqs(adapter, 1);
2613out:
2614 adapter->state = VNIC_DOWN;
2615 return rc;
2616}
2617
2618static void __ibmvnic_reset(struct work_struct *work)
2619{
2620 struct ibmvnic_adapter *adapter;
2621 unsigned int timeout = 5000;
2622 struct ibmvnic_rwi *tmprwi;
2623 bool saved_state = false;
2624 struct ibmvnic_rwi *rwi;
2625 unsigned long flags;
2626 struct device *dev;
2627 bool need_reset;
2628 int num_fails = 0;
2629 u32 reset_state;
2630 int rc = 0;
2631
2632 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2633 dev = &adapter->vdev->dev;
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644 if (adapter->state == VNIC_PROBING &&
2645 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
2646 dev_err(dev, "Reset thread timed out on probe");
2647 queue_delayed_work(system_long_wq,
2648 &adapter->ibmvnic_delayed_reset,
2649 IBMVNIC_RESET_DELAY);
2650 return;
2651 }
2652
2653
2654 if (adapter->state == VNIC_REMOVING)
2655 return;
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681 need_reset = false;
2682 spin_lock(&adapter->rwi_lock);
2683 if (!list_empty(&adapter->rwi_list)) {
2684 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2685 queue_delayed_work(system_long_wq,
2686 &adapter->ibmvnic_delayed_reset,
2687 IBMVNIC_RESET_DELAY);
2688 } else {
2689 need_reset = true;
2690 }
2691 }
2692 spin_unlock(&adapter->rwi_lock);
2693
2694 if (!need_reset)
2695 return;
2696
2697 rwi = get_next_rwi(adapter);
2698 while (rwi) {
2699 spin_lock_irqsave(&adapter->state_lock, flags);
2700
2701 if (adapter->state == VNIC_REMOVING ||
2702 adapter->state == VNIC_REMOVED) {
2703 spin_unlock_irqrestore(&adapter->state_lock, flags);
2704 kfree(rwi);
2705 rc = EBUSY;
2706 break;
2707 }
2708
2709 if (!saved_state) {
2710 reset_state = adapter->state;
2711 saved_state = true;
2712 }
2713 spin_unlock_irqrestore(&adapter->state_lock, flags);
2714
2715 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2716 rtnl_lock();
2717 rc = do_passive_init(adapter);
2718 rtnl_unlock();
2719 if (!rc)
2720 netif_carrier_on(adapter->netdev);
2721 } else if (adapter->force_reset_recovery) {
2722
2723
2724
2725
2726 adapter->failover_pending = false;
2727
2728
2729 if (adapter->wait_for_reset) {
2730
2731 adapter->force_reset_recovery = false;
2732 rc = do_hard_reset(adapter, rwi, reset_state);
2733 } else {
2734 rtnl_lock();
2735 adapter->force_reset_recovery = false;
2736 rc = do_hard_reset(adapter, rwi, reset_state);
2737 rtnl_unlock();
2738 }
2739 if (rc)
2740 num_fails++;
2741 else
2742 num_fails = 0;
2743
2744
2745
2746
2747
2748
2749
2750
2751 if (num_fails >= 3) {
2752 netdev_dbg(adapter->netdev,
2753 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2754 adapter_state_to_string(adapter->state),
2755 num_fails);
2756 set_current_state(TASK_UNINTERRUPTIBLE);
2757 schedule_timeout(60 * HZ);
2758 }
2759 } else {
2760 rc = do_reset(adapter, rwi, reset_state);
2761 }
2762 tmprwi = rwi;
2763 adapter->last_reset_time = jiffies;
2764
2765 if (rc)
2766 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2767
2768 rwi = get_next_rwi(adapter);
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780 if (rwi)
2781 kfree(tmprwi);
2782 else if (rc)
2783 rwi = tmprwi;
2784
2785 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2786 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
2787 adapter->force_reset_recovery = true;
2788 }
2789
2790 if (adapter->wait_for_reset) {
2791 adapter->reset_done_rc = rc;
2792 complete(&adapter->reset_done);
2793 }
2794
2795 clear_bit_unlock(0, &adapter->resetting);
2796
2797 netdev_dbg(adapter->netdev,
2798 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2799 adapter_state_to_string(adapter->state),
2800 adapter->force_reset_recovery,
2801 adapter->wait_for_reset);
2802}
2803
2804static void __ibmvnic_delayed_reset(struct work_struct *work)
2805{
2806 struct ibmvnic_adapter *adapter;
2807
2808 adapter = container_of(work, struct ibmvnic_adapter,
2809 ibmvnic_delayed_reset.work);
2810 __ibmvnic_reset(&adapter->ibmvnic_reset);
2811}
2812
2813static void flush_reset_queue(struct ibmvnic_adapter *adapter)
2814{
2815 struct list_head *entry, *tmp_entry;
2816
2817 if (!list_empty(&adapter->rwi_list)) {
2818 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
2819 list_del(entry);
2820 kfree(list_entry(entry, struct ibmvnic_rwi, list));
2821 }
2822 }
2823}
2824
2825static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2826 enum ibmvnic_reset_reason reason)
2827{
2828 struct net_device *netdev = adapter->netdev;
2829 struct ibmvnic_rwi *rwi, *tmp;
2830 unsigned long flags;
2831 int ret;
2832
2833 spin_lock_irqsave(&adapter->rwi_lock, flags);
2834
2835
2836
2837
2838
2839
2840 if (adapter->state == VNIC_REMOVING ||
2841 adapter->state == VNIC_REMOVED ||
2842 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2843 ret = EBUSY;
2844 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2845 goto err;
2846 }
2847
2848 list_for_each_entry(tmp, &adapter->rwi_list, list) {
2849 if (tmp->reset_reason == reason) {
2850 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2851 reset_reason_to_string(reason));
2852 ret = EBUSY;
2853 goto err;
2854 }
2855 }
2856
2857 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2858 if (!rwi) {
2859 ret = ENOMEM;
2860 goto err;
2861 }
2862
2863
2864
2865 if (adapter->force_reset_recovery)
2866 flush_reset_queue(adapter);
2867
2868 rwi->reset_reason = reason;
2869 list_add_tail(&rwi->list, &adapter->rwi_list);
2870 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2871 reset_reason_to_string(reason));
2872 queue_work(system_long_wq, &adapter->ibmvnic_reset);
2873
2874 ret = 0;
2875err:
2876
2877 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2878
2879 if (ret == ENOMEM)
2880 ibmvnic_close(netdev);
2881
2882 return -ret;
2883}
2884
2885static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2886{
2887 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2888
2889 if (test_bit(0, &adapter->resetting)) {
2890 netdev_err(adapter->netdev,
2891 "Adapter is resetting, skip timeout reset\n");
2892 return;
2893 }
2894
2895
2896
2897 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2898 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2899 return;
2900 }
2901 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2902}
2903
2904static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2905 struct ibmvnic_rx_buff *rx_buff)
2906{
2907 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2908
2909 rx_buff->skb = NULL;
2910
2911 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2912 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2913
2914 atomic_dec(&pool->available);
2915}
2916
2917static int ibmvnic_poll(struct napi_struct *napi, int budget)
2918{
2919 struct ibmvnic_sub_crq_queue *rx_scrq;
2920 struct ibmvnic_adapter *adapter;
2921 struct net_device *netdev;
2922 int frames_processed;
2923 int scrq_num;
2924
2925 netdev = napi->dev;
2926 adapter = netdev_priv(netdev);
2927 scrq_num = (int)(napi - adapter->napi);
2928 frames_processed = 0;
2929 rx_scrq = adapter->rx_scrq[scrq_num];
2930
2931restart_poll:
2932 while (frames_processed < budget) {
2933 struct sk_buff *skb;
2934 struct ibmvnic_rx_buff *rx_buff;
2935 union sub_crq *next;
2936 u32 length;
2937 u16 offset;
2938 u8 flags = 0;
2939
2940 if (unlikely(test_bit(0, &adapter->resetting) &&
2941 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2942 enable_scrq_irq(adapter, rx_scrq);
2943 napi_complete_done(napi, frames_processed);
2944 return frames_processed;
2945 }
2946
2947 if (!pending_scrq(adapter, rx_scrq))
2948 break;
2949 next = ibmvnic_next_scrq(adapter, rx_scrq);
2950 rx_buff = (struct ibmvnic_rx_buff *)
2951 be64_to_cpu(next->rx_comp.correlator);
2952
2953 if (next->rx_comp.rc) {
2954 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2955 be16_to_cpu(next->rx_comp.rc));
2956
2957 next->rx_comp.first = 0;
2958 dev_kfree_skb_any(rx_buff->skb);
2959 remove_buff_from_pool(adapter, rx_buff);
2960 continue;
2961 } else if (!rx_buff->skb) {
2962
2963 next->rx_comp.first = 0;
2964 remove_buff_from_pool(adapter, rx_buff);
2965 continue;
2966 }
2967
2968 length = be32_to_cpu(next->rx_comp.len);
2969 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2970 flags = next->rx_comp.flags;
2971 skb = rx_buff->skb;
2972
2973 dma_rmb();
2974 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2975 length);
2976
2977
2978
2979
2980 if (adapter->rx_vlan_header_insertion &&
2981 (flags & IBMVNIC_VLAN_STRIPPED))
2982 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2983 ntohs(next->rx_comp.vlan_tci));
2984
2985
2986 next->rx_comp.first = 0;
2987 remove_buff_from_pool(adapter, rx_buff);
2988
2989 skb_put(skb, length);
2990 skb->protocol = eth_type_trans(skb, netdev);
2991 skb_record_rx_queue(skb, scrq_num);
2992
2993 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2994 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2995 skb->ip_summed = CHECKSUM_UNNECESSARY;
2996 }
2997
2998 length = skb->len;
2999 napi_gro_receive(napi, skb);
3000 netdev->stats.rx_packets++;
3001 netdev->stats.rx_bytes += length;
3002 adapter->rx_stats_buffers[scrq_num].packets++;
3003 adapter->rx_stats_buffers[scrq_num].bytes += length;
3004 frames_processed++;
3005 }
3006
3007 if (adapter->state != VNIC_CLOSING &&
3008 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
3009 adapter->req_rx_add_entries_per_subcrq / 2) ||
3010 frames_processed < budget))
3011 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3012 if (frames_processed < budget) {
3013 if (napi_complete_done(napi, frames_processed)) {
3014 enable_scrq_irq(adapter, rx_scrq);
3015 if (pending_scrq(adapter, rx_scrq)) {
3016 if (napi_reschedule(napi)) {
3017 disable_scrq_irq(adapter, rx_scrq);
3018 goto restart_poll;
3019 }
3020 }
3021 }
3022 }
3023 return frames_processed;
3024}
3025
3026static int wait_for_reset(struct ibmvnic_adapter *adapter)
3027{
3028 int rc, ret;
3029
3030 adapter->fallback.mtu = adapter->req_mtu;
3031 adapter->fallback.rx_queues = adapter->req_rx_queues;
3032 adapter->fallback.tx_queues = adapter->req_tx_queues;
3033 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3034 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3035
3036 reinit_completion(&adapter->reset_done);
3037 adapter->wait_for_reset = true;
3038 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3039
3040 if (rc) {
3041 ret = rc;
3042 goto out;
3043 }
3044 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3045 if (rc) {
3046 ret = -ENODEV;
3047 goto out;
3048 }
3049
3050 ret = 0;
3051 if (adapter->reset_done_rc) {
3052 ret = -EIO;
3053 adapter->desired.mtu = adapter->fallback.mtu;
3054 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3055 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3056 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3057 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3058
3059 reinit_completion(&adapter->reset_done);
3060 adapter->wait_for_reset = true;
3061 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3062 if (rc) {
3063 ret = rc;
3064 goto out;
3065 }
3066 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3067 60000);
3068 if (rc) {
3069 ret = -ENODEV;
3070 goto out;
3071 }
3072 }
3073out:
3074 adapter->wait_for_reset = false;
3075
3076 return ret;
3077}
3078
3079static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3080{
3081 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3082
3083 adapter->desired.mtu = new_mtu + ETH_HLEN;
3084
3085 return wait_for_reset(adapter);
3086}
3087
3088static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3089 struct net_device *dev,
3090 netdev_features_t features)
3091{
3092
3093
3094
3095
3096 if (skb_is_gso(skb)) {
3097 if (skb_shinfo(skb)->gso_size < 224 ||
3098 skb_shinfo(skb)->gso_segs == 1)
3099 features &= ~NETIF_F_GSO_MASK;
3100 }
3101
3102 return features;
3103}
3104
3105static const struct net_device_ops ibmvnic_netdev_ops = {
3106 .ndo_open = ibmvnic_open,
3107 .ndo_stop = ibmvnic_close,
3108 .ndo_start_xmit = ibmvnic_xmit,
3109 .ndo_set_rx_mode = ibmvnic_set_multi,
3110 .ndo_set_mac_address = ibmvnic_set_mac,
3111 .ndo_validate_addr = eth_validate_addr,
3112 .ndo_tx_timeout = ibmvnic_tx_timeout,
3113 .ndo_change_mtu = ibmvnic_change_mtu,
3114 .ndo_features_check = ibmvnic_features_check,
3115};
3116
3117
3118
3119static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3120 struct ethtool_link_ksettings *cmd)
3121{
3122 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3123 int rc;
3124
3125 rc = send_query_phys_parms(adapter);
3126 if (rc) {
3127 adapter->speed = SPEED_UNKNOWN;
3128 adapter->duplex = DUPLEX_UNKNOWN;
3129 }
3130 cmd->base.speed = adapter->speed;
3131 cmd->base.duplex = adapter->duplex;
3132 cmd->base.port = PORT_FIBRE;
3133 cmd->base.phy_address = 0;
3134 cmd->base.autoneg = AUTONEG_ENABLE;
3135
3136 return 0;
3137}
3138
3139static void ibmvnic_get_drvinfo(struct net_device *netdev,
3140 struct ethtool_drvinfo *info)
3141{
3142 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3143
3144 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3145 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3146 strscpy(info->fw_version, adapter->fw_version,
3147 sizeof(info->fw_version));
3148}
3149
3150static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3151{
3152 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3153
3154 return adapter->msg_enable;
3155}
3156
3157static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3158{
3159 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3160
3161 adapter->msg_enable = data;
3162}
3163
3164static u32 ibmvnic_get_link(struct net_device *netdev)
3165{
3166 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3167
3168
3169
3170
3171 return adapter->logical_link_state;
3172}
3173
3174static void ibmvnic_get_ringparam(struct net_device *netdev,
3175 struct ethtool_ringparam *ring,
3176 struct kernel_ethtool_ringparam *kernel_ring,
3177 struct netlink_ext_ack *extack)
3178{
3179 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3180
3181 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3182 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3183 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3184 } else {
3185 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3186 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3187 }
3188 ring->rx_mini_max_pending = 0;
3189 ring->rx_jumbo_max_pending = 0;
3190 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3191 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3192 ring->rx_mini_pending = 0;
3193 ring->rx_jumbo_pending = 0;
3194}
3195
3196static int ibmvnic_set_ringparam(struct net_device *netdev,
3197 struct ethtool_ringparam *ring,
3198 struct kernel_ethtool_ringparam *kernel_ring,
3199 struct netlink_ext_ack *extack)
3200{
3201 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3202 int ret;
3203
3204 ret = 0;
3205 adapter->desired.rx_entries = ring->rx_pending;
3206 adapter->desired.tx_entries = ring->tx_pending;
3207
3208 ret = wait_for_reset(adapter);
3209
3210 if (!ret &&
3211 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
3212 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
3213 netdev_info(netdev,
3214 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3215 ring->rx_pending, ring->tx_pending,
3216 adapter->req_rx_add_entries_per_subcrq,
3217 adapter->req_tx_entries_per_subcrq);
3218 return ret;
3219}
3220
3221static void ibmvnic_get_channels(struct net_device *netdev,
3222 struct ethtool_channels *channels)
3223{
3224 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3225
3226 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3227 channels->max_rx = adapter->max_rx_queues;
3228 channels->max_tx = adapter->max_tx_queues;
3229 } else {
3230 channels->max_rx = IBMVNIC_MAX_QUEUES;
3231 channels->max_tx = IBMVNIC_MAX_QUEUES;
3232 }
3233
3234 channels->max_other = 0;
3235 channels->max_combined = 0;
3236 channels->rx_count = adapter->req_rx_queues;
3237 channels->tx_count = adapter->req_tx_queues;
3238 channels->other_count = 0;
3239 channels->combined_count = 0;
3240}
3241
3242static int ibmvnic_set_channels(struct net_device *netdev,
3243 struct ethtool_channels *channels)
3244{
3245 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3246 int ret;
3247
3248 ret = 0;
3249 adapter->desired.rx_queues = channels->rx_count;
3250 adapter->desired.tx_queues = channels->tx_count;
3251
3252 ret = wait_for_reset(adapter);
3253
3254 if (!ret &&
3255 (adapter->req_rx_queues != channels->rx_count ||
3256 adapter->req_tx_queues != channels->tx_count))
3257 netdev_info(netdev,
3258 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3259 channels->rx_count, channels->tx_count,
3260 adapter->req_rx_queues, adapter->req_tx_queues);
3261 return ret;
3262}
3263
3264static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3265{
3266 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3267 int i;
3268
3269 switch (stringset) {
3270 case ETH_SS_STATS:
3271 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
3272 i++, data += ETH_GSTRING_LEN)
3273 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3274
3275 for (i = 0; i < adapter->req_tx_queues; i++) {
3276 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3277 data += ETH_GSTRING_LEN;
3278
3279 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3280 data += ETH_GSTRING_LEN;
3281
3282 snprintf(data, ETH_GSTRING_LEN,
3283 "tx%d_dropped_packets", i);
3284 data += ETH_GSTRING_LEN;
3285 }
3286
3287 for (i = 0; i < adapter->req_rx_queues; i++) {
3288 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3289 data += ETH_GSTRING_LEN;
3290
3291 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3292 data += ETH_GSTRING_LEN;
3293
3294 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3295 data += ETH_GSTRING_LEN;
3296 }
3297 break;
3298
3299 case ETH_SS_PRIV_FLAGS:
3300 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3301 strcpy(data + i * ETH_GSTRING_LEN,
3302 ibmvnic_priv_flags[i]);
3303 break;
3304 default:
3305 return;
3306 }
3307}
3308
3309static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3310{
3311 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3312
3313 switch (sset) {
3314 case ETH_SS_STATS:
3315 return ARRAY_SIZE(ibmvnic_stats) +
3316 adapter->req_tx_queues * NUM_TX_STATS +
3317 adapter->req_rx_queues * NUM_RX_STATS;
3318 case ETH_SS_PRIV_FLAGS:
3319 return ARRAY_SIZE(ibmvnic_priv_flags);
3320 default:
3321 return -EOPNOTSUPP;
3322 }
3323}
3324
3325static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3326 struct ethtool_stats *stats, u64 *data)
3327{
3328 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3329 union ibmvnic_crq crq;
3330 int i, j;
3331 int rc;
3332
3333 memset(&crq, 0, sizeof(crq));
3334 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3335 crq.request_statistics.cmd = REQUEST_STATISTICS;
3336 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3337 crq.request_statistics.len =
3338 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3339
3340
3341 reinit_completion(&adapter->stats_done);
3342 rc = ibmvnic_send_crq(adapter, &crq);
3343 if (rc)
3344 return;
3345 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3346 if (rc)
3347 return;
3348
3349 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3350 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3351 (adapter, ibmvnic_stats[i].offset));
3352
3353 for (j = 0; j < adapter->req_tx_queues; j++) {
3354 data[i] = adapter->tx_stats_buffers[j].packets;
3355 i++;
3356 data[i] = adapter->tx_stats_buffers[j].bytes;
3357 i++;
3358 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3359 i++;
3360 }
3361
3362 for (j = 0; j < adapter->req_rx_queues; j++) {
3363 data[i] = adapter->rx_stats_buffers[j].packets;
3364 i++;
3365 data[i] = adapter->rx_stats_buffers[j].bytes;
3366 i++;
3367 data[i] = adapter->rx_stats_buffers[j].interrupts;
3368 i++;
3369 }
3370}
3371
3372static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3373{
3374 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3375
3376 return adapter->priv_flags;
3377}
3378
3379static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3380{
3381 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3382 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3383
3384 if (which_maxes)
3385 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3386 else
3387 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3388
3389 return 0;
3390}
3391
3392static const struct ethtool_ops ibmvnic_ethtool_ops = {
3393 .get_drvinfo = ibmvnic_get_drvinfo,
3394 .get_msglevel = ibmvnic_get_msglevel,
3395 .set_msglevel = ibmvnic_set_msglevel,
3396 .get_link = ibmvnic_get_link,
3397 .get_ringparam = ibmvnic_get_ringparam,
3398 .set_ringparam = ibmvnic_set_ringparam,
3399 .get_channels = ibmvnic_get_channels,
3400 .set_channels = ibmvnic_set_channels,
3401 .get_strings = ibmvnic_get_strings,
3402 .get_sset_count = ibmvnic_get_sset_count,
3403 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3404 .get_link_ksettings = ibmvnic_get_link_ksettings,
3405 .get_priv_flags = ibmvnic_get_priv_flags,
3406 .set_priv_flags = ibmvnic_set_priv_flags,
3407};
3408
3409
3410
3411static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3412 struct ibmvnic_sub_crq_queue *scrq)
3413{
3414 int rc;
3415
3416 if (!scrq) {
3417 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3418 return -EINVAL;
3419 }
3420
3421 if (scrq->irq) {
3422 free_irq(scrq->irq, scrq);
3423 irq_dispose_mapping(scrq->irq);
3424 scrq->irq = 0;
3425 }
3426
3427 if (scrq->msgs) {
3428 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3429 atomic_set(&scrq->used, 0);
3430 scrq->cur = 0;
3431 scrq->ind_buf.index = 0;
3432 } else {
3433 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3434 return -EINVAL;
3435 }
3436
3437 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3438 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3439 return rc;
3440}
3441
3442static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3443{
3444 int i, rc;
3445
3446 if (!adapter->tx_scrq || !adapter->rx_scrq)
3447 return -EINVAL;
3448
3449 for (i = 0; i < adapter->req_tx_queues; i++) {
3450 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3451 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3452 if (rc)
3453 return rc;
3454 }
3455
3456 for (i = 0; i < adapter->req_rx_queues; i++) {
3457 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3458 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3459 if (rc)
3460 return rc;
3461 }
3462
3463 return rc;
3464}
3465
3466static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3467 struct ibmvnic_sub_crq_queue *scrq,
3468 bool do_h_free)
3469{
3470 struct device *dev = &adapter->vdev->dev;
3471 long rc;
3472
3473 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3474
3475 if (do_h_free) {
3476
3477 do {
3478 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3479 adapter->vdev->unit_address,
3480 scrq->crq_num);
3481 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3482
3483 if (rc) {
3484 netdev_err(adapter->netdev,
3485 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3486 scrq->crq_num, rc);
3487 }
3488 }
3489
3490 dma_free_coherent(dev,
3491 IBMVNIC_IND_ARR_SZ,
3492 scrq->ind_buf.indir_arr,
3493 scrq->ind_buf.indir_dma);
3494
3495 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3496 DMA_BIDIRECTIONAL);
3497 free_pages((unsigned long)scrq->msgs, 2);
3498 kfree(scrq);
3499}
3500
3501static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3502 *adapter)
3503{
3504 struct device *dev = &adapter->vdev->dev;
3505 struct ibmvnic_sub_crq_queue *scrq;
3506 int rc;
3507
3508 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3509 if (!scrq)
3510 return NULL;
3511
3512 scrq->msgs =
3513 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3514 if (!scrq->msgs) {
3515 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3516 goto zero_page_failed;
3517 }
3518
3519 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3520 DMA_BIDIRECTIONAL);
3521 if (dma_mapping_error(dev, scrq->msg_token)) {
3522 dev_warn(dev, "Couldn't map crq queue messages page\n");
3523 goto map_failed;
3524 }
3525
3526 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3527 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3528
3529 if (rc == H_RESOURCE)
3530 rc = ibmvnic_reset_crq(adapter);
3531
3532 if (rc == H_CLOSED) {
3533 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3534 } else if (rc) {
3535 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3536 goto reg_failed;
3537 }
3538
3539 scrq->adapter = adapter;
3540 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3541 scrq->ind_buf.index = 0;
3542
3543 scrq->ind_buf.indir_arr =
3544 dma_alloc_coherent(dev,
3545 IBMVNIC_IND_ARR_SZ,
3546 &scrq->ind_buf.indir_dma,
3547 GFP_KERNEL);
3548
3549 if (!scrq->ind_buf.indir_arr)
3550 goto indir_failed;
3551
3552 spin_lock_init(&scrq->lock);
3553
3554 netdev_dbg(adapter->netdev,
3555 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3556 scrq->crq_num, scrq->hw_irq, scrq->irq);
3557
3558 return scrq;
3559
3560indir_failed:
3561 do {
3562 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3563 adapter->vdev->unit_address,
3564 scrq->crq_num);
3565 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3566reg_failed:
3567 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3568 DMA_BIDIRECTIONAL);
3569map_failed:
3570 free_pages((unsigned long)scrq->msgs, 2);
3571zero_page_failed:
3572 kfree(scrq);
3573
3574 return NULL;
3575}
3576
3577static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3578{
3579 int i;
3580
3581 if (adapter->tx_scrq) {
3582 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3583 if (!adapter->tx_scrq[i])
3584 continue;
3585
3586 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3587 i);
3588 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
3589 if (adapter->tx_scrq[i]->irq) {
3590 free_irq(adapter->tx_scrq[i]->irq,
3591 adapter->tx_scrq[i]);
3592 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3593 adapter->tx_scrq[i]->irq = 0;
3594 }
3595
3596 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3597 do_h_free);
3598 }
3599
3600 kfree(adapter->tx_scrq);
3601 adapter->tx_scrq = NULL;
3602 adapter->num_active_tx_scrqs = 0;
3603 }
3604
3605 if (adapter->rx_scrq) {
3606 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3607 if (!adapter->rx_scrq[i])
3608 continue;
3609
3610 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3611 i);
3612 if (adapter->rx_scrq[i]->irq) {
3613 free_irq(adapter->rx_scrq[i]->irq,
3614 adapter->rx_scrq[i]);
3615 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3616 adapter->rx_scrq[i]->irq = 0;
3617 }
3618
3619 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3620 do_h_free);
3621 }
3622
3623 kfree(adapter->rx_scrq);
3624 adapter->rx_scrq = NULL;
3625 adapter->num_active_rx_scrqs = 0;
3626 }
3627}
3628
3629static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3630 struct ibmvnic_sub_crq_queue *scrq)
3631{
3632 struct device *dev = &adapter->vdev->dev;
3633 unsigned long rc;
3634
3635 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3636 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3637 if (rc)
3638 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3639 scrq->hw_irq, rc);
3640 return rc;
3641}
3642
3643static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3644 struct ibmvnic_sub_crq_queue *scrq)
3645{
3646 struct device *dev = &adapter->vdev->dev;
3647 unsigned long rc;
3648
3649 if (scrq->hw_irq > 0x100000000ULL) {
3650 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3651 return 1;
3652 }
3653
3654 if (test_bit(0, &adapter->resetting) &&
3655 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3656 u64 val = (0xff000000) | scrq->hw_irq;
3657
3658 rc = plpar_hcall_norets(H_EOI, val);
3659
3660
3661
3662 if (rc && (rc != H_FUNCTION))
3663 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3664 val, rc);
3665 }
3666
3667 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3668 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3669 if (rc)
3670 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3671 scrq->hw_irq, rc);
3672 return rc;
3673}
3674
3675static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3676 struct ibmvnic_sub_crq_queue *scrq)
3677{
3678 struct device *dev = &adapter->vdev->dev;
3679 struct ibmvnic_tx_pool *tx_pool;
3680 struct ibmvnic_tx_buff *txbuff;
3681 struct netdev_queue *txq;
3682 union sub_crq *next;
3683 int index;
3684 int i;
3685
3686restart_loop:
3687 while (pending_scrq(adapter, scrq)) {
3688 unsigned int pool = scrq->pool_index;
3689 int num_entries = 0;
3690 int total_bytes = 0;
3691 int num_packets = 0;
3692
3693 next = ibmvnic_next_scrq(adapter, scrq);
3694 for (i = 0; i < next->tx_comp.num_comps; i++) {
3695 index = be32_to_cpu(next->tx_comp.correlators[i]);
3696 if (index & IBMVNIC_TSO_POOL_MASK) {
3697 tx_pool = &adapter->tso_pool[pool];
3698 index &= ~IBMVNIC_TSO_POOL_MASK;
3699 } else {
3700 tx_pool = &adapter->tx_pool[pool];
3701 }
3702
3703 txbuff = &tx_pool->tx_buff[index];
3704 num_packets++;
3705 num_entries += txbuff->num_entries;
3706 if (txbuff->skb) {
3707 total_bytes += txbuff->skb->len;
3708 if (next->tx_comp.rcs[i]) {
3709 dev_err(dev, "tx error %x\n",
3710 next->tx_comp.rcs[i]);
3711 dev_kfree_skb_irq(txbuff->skb);
3712 } else {
3713 dev_consume_skb_irq(txbuff->skb);
3714 }
3715 txbuff->skb = NULL;
3716 } else {
3717 netdev_warn(adapter->netdev,
3718 "TX completion received with NULL socket buffer\n");
3719 }
3720 tx_pool->free_map[tx_pool->producer_index] = index;
3721 tx_pool->producer_index =
3722 (tx_pool->producer_index + 1) %
3723 tx_pool->num_buffers;
3724 }
3725
3726 next->tx_comp.first = 0;
3727
3728 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3729 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3730
3731 if (atomic_sub_return(num_entries, &scrq->used) <=
3732 (adapter->req_tx_entries_per_subcrq / 2) &&
3733 __netif_subqueue_stopped(adapter->netdev,
3734 scrq->pool_index)) {
3735 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3736 netdev_dbg(adapter->netdev, "Started queue %d\n",
3737 scrq->pool_index);
3738 }
3739 }
3740
3741 enable_scrq_irq(adapter, scrq);
3742
3743 if (pending_scrq(adapter, scrq)) {
3744 disable_scrq_irq(adapter, scrq);
3745 goto restart_loop;
3746 }
3747
3748 return 0;
3749}
3750
3751static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3752{
3753 struct ibmvnic_sub_crq_queue *scrq = instance;
3754 struct ibmvnic_adapter *adapter = scrq->adapter;
3755
3756 disable_scrq_irq(adapter, scrq);
3757 ibmvnic_complete_tx(adapter, scrq);
3758
3759 return IRQ_HANDLED;
3760}
3761
3762static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3763{
3764 struct ibmvnic_sub_crq_queue *scrq = instance;
3765 struct ibmvnic_adapter *adapter = scrq->adapter;
3766
3767
3768
3769
3770 if (unlikely(adapter->state != VNIC_OPEN))
3771 return IRQ_NONE;
3772
3773 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3774
3775 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3776 disable_scrq_irq(adapter, scrq);
3777 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3778 }
3779
3780 return IRQ_HANDLED;
3781}
3782
3783static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3784{
3785 struct device *dev = &adapter->vdev->dev;
3786 struct ibmvnic_sub_crq_queue *scrq;
3787 int i = 0, j = 0;
3788 int rc = 0;
3789
3790 for (i = 0; i < adapter->req_tx_queues; i++) {
3791 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3792 i);
3793 scrq = adapter->tx_scrq[i];
3794 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3795
3796 if (!scrq->irq) {
3797 rc = -EINVAL;
3798 dev_err(dev, "Error mapping irq\n");
3799 goto req_tx_irq_failed;
3800 }
3801
3802 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3803 adapter->vdev->unit_address, i);
3804 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3805 0, scrq->name, scrq);
3806
3807 if (rc) {
3808 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3809 scrq->irq, rc);
3810 irq_dispose_mapping(scrq->irq);
3811 goto req_tx_irq_failed;
3812 }
3813 }
3814
3815 for (i = 0; i < adapter->req_rx_queues; i++) {
3816 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3817 i);
3818 scrq = adapter->rx_scrq[i];
3819 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3820 if (!scrq->irq) {
3821 rc = -EINVAL;
3822 dev_err(dev, "Error mapping irq\n");
3823 goto req_rx_irq_failed;
3824 }
3825 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3826 adapter->vdev->unit_address, i);
3827 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3828 0, scrq->name, scrq);
3829 if (rc) {
3830 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3831 scrq->irq, rc);
3832 irq_dispose_mapping(scrq->irq);
3833 goto req_rx_irq_failed;
3834 }
3835 }
3836 return rc;
3837
3838req_rx_irq_failed:
3839 for (j = 0; j < i; j++) {
3840 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3841 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3842 }
3843 i = adapter->req_tx_queues;
3844req_tx_irq_failed:
3845 for (j = 0; j < i; j++) {
3846 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3847 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3848 }
3849 release_sub_crqs(adapter, 1);
3850 return rc;
3851}
3852
3853static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3854{
3855 struct device *dev = &adapter->vdev->dev;
3856 struct ibmvnic_sub_crq_queue **allqueues;
3857 int registered_queues = 0;
3858 int total_queues;
3859 int more = 0;
3860 int i;
3861
3862 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3863
3864 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3865 if (!allqueues)
3866 return -ENOMEM;
3867
3868 for (i = 0; i < total_queues; i++) {
3869 allqueues[i] = init_sub_crq_queue(adapter);
3870 if (!allqueues[i]) {
3871 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3872 break;
3873 }
3874 registered_queues++;
3875 }
3876
3877
3878 if (registered_queues <
3879 adapter->min_tx_queues + adapter->min_rx_queues) {
3880 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3881 goto tx_failed;
3882 }
3883
3884
3885 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3886 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3887 switch (i % 3) {
3888 case 0:
3889 if (adapter->req_rx_queues > adapter->min_rx_queues)
3890 adapter->req_rx_queues--;
3891 else
3892 more++;
3893 break;
3894 case 1:
3895 if (adapter->req_tx_queues > adapter->min_tx_queues)
3896 adapter->req_tx_queues--;
3897 else
3898 more++;
3899 break;
3900 }
3901 }
3902
3903 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3904 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3905 if (!adapter->tx_scrq)
3906 goto tx_failed;
3907
3908 for (i = 0; i < adapter->req_tx_queues; i++) {
3909 adapter->tx_scrq[i] = allqueues[i];
3910 adapter->tx_scrq[i]->pool_index = i;
3911 adapter->num_active_tx_scrqs++;
3912 }
3913
3914 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3915 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3916 if (!adapter->rx_scrq)
3917 goto rx_failed;
3918
3919 for (i = 0; i < adapter->req_rx_queues; i++) {
3920 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3921 adapter->rx_scrq[i]->scrq_num = i;
3922 adapter->num_active_rx_scrqs++;
3923 }
3924
3925 kfree(allqueues);
3926 return 0;
3927
3928rx_failed:
3929 kfree(adapter->tx_scrq);
3930 adapter->tx_scrq = NULL;
3931tx_failed:
3932 for (i = 0; i < registered_queues; i++)
3933 release_sub_crq_queue(adapter, allqueues[i], 1);
3934 kfree(allqueues);
3935 return -ENOMEM;
3936}
3937
3938static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3939{
3940 struct device *dev = &adapter->vdev->dev;
3941 union ibmvnic_crq crq;
3942 int max_entries;
3943 int cap_reqs;
3944
3945
3946
3947
3948
3949
3950 if (!(adapter->netdev->flags & IFF_PROMISC) ||
3951 adapter->promisc_supported)
3952 cap_reqs = 7;
3953 else
3954 cap_reqs = 6;
3955
3956 if (!retry) {
3957
3958 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3959
3960 atomic_set(&adapter->running_cap_crqs, cap_reqs);
3961
3962 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3963 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3964 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3965 return;
3966 }
3967
3968 if (adapter->desired.mtu)
3969 adapter->req_mtu = adapter->desired.mtu;
3970 else
3971 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3972
3973 if (!adapter->desired.tx_entries)
3974 adapter->desired.tx_entries =
3975 adapter->max_tx_entries_per_subcrq;
3976 if (!adapter->desired.rx_entries)
3977 adapter->desired.rx_entries =
3978 adapter->max_rx_add_entries_per_subcrq;
3979
3980 max_entries = IBMVNIC_MAX_LTB_SIZE /
3981 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3982
3983 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3984 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3985 adapter->desired.tx_entries = max_entries;
3986 }
3987
3988 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3989 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3990 adapter->desired.rx_entries = max_entries;
3991 }
3992
3993 if (adapter->desired.tx_entries)
3994 adapter->req_tx_entries_per_subcrq =
3995 adapter->desired.tx_entries;
3996 else
3997 adapter->req_tx_entries_per_subcrq =
3998 adapter->max_tx_entries_per_subcrq;
3999
4000 if (adapter->desired.rx_entries)
4001 adapter->req_rx_add_entries_per_subcrq =
4002 adapter->desired.rx_entries;
4003 else
4004 adapter->req_rx_add_entries_per_subcrq =
4005 adapter->max_rx_add_entries_per_subcrq;
4006
4007 if (adapter->desired.tx_queues)
4008 adapter->req_tx_queues =
4009 adapter->desired.tx_queues;
4010 else
4011 adapter->req_tx_queues =
4012 adapter->opt_tx_comp_sub_queues;
4013
4014 if (adapter->desired.rx_queues)
4015 adapter->req_rx_queues =
4016 adapter->desired.rx_queues;
4017 else
4018 adapter->req_rx_queues =
4019 adapter->opt_rx_comp_queues;
4020
4021 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4022 } else {
4023 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4024 }
4025 memset(&crq, 0, sizeof(crq));
4026 crq.request_capability.first = IBMVNIC_CRQ_CMD;
4027 crq.request_capability.cmd = REQUEST_CAPABILITY;
4028
4029 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4030 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4031 cap_reqs--;
4032 ibmvnic_send_crq(adapter, &crq);
4033
4034 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4035 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4036 cap_reqs--;
4037 ibmvnic_send_crq(adapter, &crq);
4038
4039 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4040 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4041 cap_reqs--;
4042 ibmvnic_send_crq(adapter, &crq);
4043
4044 crq.request_capability.capability =
4045 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4046 crq.request_capability.number =
4047 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4048 cap_reqs--;
4049 ibmvnic_send_crq(adapter, &crq);
4050
4051 crq.request_capability.capability =
4052 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4053 crq.request_capability.number =
4054 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4055 cap_reqs--;
4056 ibmvnic_send_crq(adapter, &crq);
4057
4058 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4059 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4060 cap_reqs--;
4061 ibmvnic_send_crq(adapter, &crq);
4062
4063 if (adapter->netdev->flags & IFF_PROMISC) {
4064 if (adapter->promisc_supported) {
4065 crq.request_capability.capability =
4066 cpu_to_be16(PROMISC_REQUESTED);
4067 crq.request_capability.number = cpu_to_be64(1);
4068 cap_reqs--;
4069 ibmvnic_send_crq(adapter, &crq);
4070 }
4071 } else {
4072 crq.request_capability.capability =
4073 cpu_to_be16(PROMISC_REQUESTED);
4074 crq.request_capability.number = cpu_to_be64(0);
4075 cap_reqs--;
4076 ibmvnic_send_crq(adapter, &crq);
4077 }
4078
4079
4080
4081
4082 WARN_ON(cap_reqs != 0);
4083}
4084
4085static int pending_scrq(struct ibmvnic_adapter *adapter,
4086 struct ibmvnic_sub_crq_queue *scrq)
4087{
4088 union sub_crq *entry = &scrq->msgs[scrq->cur];
4089 int rc;
4090
4091 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4092
4093
4094
4095
4096 dma_rmb();
4097
4098 return rc;
4099}
4100
4101static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4102 struct ibmvnic_sub_crq_queue *scrq)
4103{
4104 union sub_crq *entry;
4105 unsigned long flags;
4106
4107 spin_lock_irqsave(&scrq->lock, flags);
4108 entry = &scrq->msgs[scrq->cur];
4109 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4110 if (++scrq->cur == scrq->size)
4111 scrq->cur = 0;
4112 } else {
4113 entry = NULL;
4114 }
4115 spin_unlock_irqrestore(&scrq->lock, flags);
4116
4117
4118
4119
4120 dma_rmb();
4121
4122 return entry;
4123}
4124
4125static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4126{
4127 struct ibmvnic_crq_queue *queue = &adapter->crq;
4128 union ibmvnic_crq *crq;
4129
4130 crq = &queue->msgs[queue->cur];
4131 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4132 if (++queue->cur == queue->size)
4133 queue->cur = 0;
4134 } else {
4135 crq = NULL;
4136 }
4137
4138 return crq;
4139}
4140
4141static void print_subcrq_error(struct device *dev, int rc, const char *func)
4142{
4143 switch (rc) {
4144 case H_PARAMETER:
4145 dev_warn_ratelimited(dev,
4146 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4147 func, rc);
4148 break;
4149 case H_CLOSED:
4150 dev_warn_ratelimited(dev,
4151 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4152 func, rc);
4153 break;
4154 default:
4155 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4156 break;
4157 }
4158}
4159
4160static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4161 u64 remote_handle, u64 ioba, u64 num_entries)
4162{
4163 unsigned int ua = adapter->vdev->unit_address;
4164 struct device *dev = &adapter->vdev->dev;
4165 int rc;
4166
4167
4168 dma_wmb();
4169 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4170 cpu_to_be64(remote_handle),
4171 ioba, num_entries);
4172
4173 if (rc)
4174 print_subcrq_error(dev, rc, __func__);
4175
4176 return rc;
4177}
4178
4179static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4180 union ibmvnic_crq *crq)
4181{
4182 unsigned int ua = adapter->vdev->unit_address;
4183 struct device *dev = &adapter->vdev->dev;
4184 u64 *u64_crq = (u64 *)crq;
4185 int rc;
4186
4187 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4188 (unsigned long)cpu_to_be64(u64_crq[0]),
4189 (unsigned long)cpu_to_be64(u64_crq[1]));
4190
4191 if (!adapter->crq.active &&
4192 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4193 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4194 return -EINVAL;
4195 }
4196
4197
4198 dma_wmb();
4199
4200 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4201 cpu_to_be64(u64_crq[0]),
4202 cpu_to_be64(u64_crq[1]));
4203
4204 if (rc) {
4205 if (rc == H_CLOSED) {
4206 dev_warn(dev, "CRQ Queue closed\n");
4207
4208 }
4209
4210 dev_warn(dev, "Send error (rc=%d)\n", rc);
4211 }
4212
4213 return rc;
4214}
4215
4216static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4217{
4218 struct device *dev = &adapter->vdev->dev;
4219 union ibmvnic_crq crq;
4220 int retries = 100;
4221 int rc;
4222
4223 memset(&crq, 0, sizeof(crq));
4224 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4225 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4226 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4227
4228 do {
4229 rc = ibmvnic_send_crq(adapter, &crq);
4230 if (rc != H_CLOSED)
4231 break;
4232 retries--;
4233 msleep(50);
4234
4235 } while (retries > 0);
4236
4237 if (rc) {
4238 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4239 return rc;
4240 }
4241
4242 return 0;
4243}
4244
4245struct vnic_login_client_data {
4246 u8 type;
4247 __be16 len;
4248 char name[];
4249} __packed;
4250
4251static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4252{
4253 int len;
4254
4255
4256
4257
4258
4259 len = 4 * sizeof(struct vnic_login_client_data);
4260 len += 6;
4261 len += strlen(utsname()->nodename) + 1;
4262 len += strlen(adapter->netdev->name) + 1;
4263
4264 return len;
4265}
4266
4267static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4268 struct vnic_login_client_data *vlcd)
4269{
4270 const char *os_name = "Linux";
4271 int len;
4272
4273
4274 vlcd->type = 1;
4275 len = strlen(os_name) + 1;
4276 vlcd->len = cpu_to_be16(len);
4277 strscpy(vlcd->name, os_name, len);
4278 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4279
4280
4281 vlcd->type = 2;
4282 len = strlen(utsname()->nodename) + 1;
4283 vlcd->len = cpu_to_be16(len);
4284 strscpy(vlcd->name, utsname()->nodename, len);
4285 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4286
4287
4288 vlcd->type = 3;
4289 len = strlen(adapter->netdev->name) + 1;
4290 vlcd->len = cpu_to_be16(len);
4291 strscpy(vlcd->name, adapter->netdev->name, len);
4292}
4293
4294static int send_login(struct ibmvnic_adapter *adapter)
4295{
4296 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4297 struct ibmvnic_login_buffer *login_buffer;
4298 struct device *dev = &adapter->vdev->dev;
4299 struct vnic_login_client_data *vlcd;
4300 dma_addr_t rsp_buffer_token;
4301 dma_addr_t buffer_token;
4302 size_t rsp_buffer_size;
4303 union ibmvnic_crq crq;
4304 int client_data_len;
4305 size_t buffer_size;
4306 __be64 *tx_list_p;
4307 __be64 *rx_list_p;
4308 int rc;
4309 int i;
4310
4311 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4312 netdev_err(adapter->netdev,
4313 "RX or TX queues are not allocated, device login failed\n");
4314 return -ENOMEM;
4315 }
4316
4317 release_login_buffer(adapter);
4318 release_login_rsp_buffer(adapter);
4319
4320 client_data_len = vnic_client_data_len(adapter);
4321
4322 buffer_size =
4323 sizeof(struct ibmvnic_login_buffer) +
4324 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4325 client_data_len;
4326
4327 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4328 if (!login_buffer)
4329 goto buf_alloc_failed;
4330
4331 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4332 DMA_TO_DEVICE);
4333 if (dma_mapping_error(dev, buffer_token)) {
4334 dev_err(dev, "Couldn't map login buffer\n");
4335 goto buf_map_failed;
4336 }
4337
4338 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4339 sizeof(u64) * adapter->req_tx_queues +
4340 sizeof(u64) * adapter->req_rx_queues +
4341 sizeof(u64) * adapter->req_rx_queues +
4342 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4343
4344 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4345 if (!login_rsp_buffer)
4346 goto buf_rsp_alloc_failed;
4347
4348 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4349 rsp_buffer_size, DMA_FROM_DEVICE);
4350 if (dma_mapping_error(dev, rsp_buffer_token)) {
4351 dev_err(dev, "Couldn't map login rsp buffer\n");
4352 goto buf_rsp_map_failed;
4353 }
4354
4355 adapter->login_buf = login_buffer;
4356 adapter->login_buf_token = buffer_token;
4357 adapter->login_buf_sz = buffer_size;
4358 adapter->login_rsp_buf = login_rsp_buffer;
4359 adapter->login_rsp_buf_token = rsp_buffer_token;
4360 adapter->login_rsp_buf_sz = rsp_buffer_size;
4361
4362 login_buffer->len = cpu_to_be32(buffer_size);
4363 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4364 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4365 login_buffer->off_txcomp_subcrqs =
4366 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4367 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4368 login_buffer->off_rxcomp_subcrqs =
4369 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4370 sizeof(u64) * adapter->req_tx_queues);
4371 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4372 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4373
4374 tx_list_p = (__be64 *)((char *)login_buffer +
4375 sizeof(struct ibmvnic_login_buffer));
4376 rx_list_p = (__be64 *)((char *)login_buffer +
4377 sizeof(struct ibmvnic_login_buffer) +
4378 sizeof(u64) * adapter->req_tx_queues);
4379
4380 for (i = 0; i < adapter->req_tx_queues; i++) {
4381 if (adapter->tx_scrq[i]) {
4382 tx_list_p[i] =
4383 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4384 }
4385 }
4386
4387 for (i = 0; i < adapter->req_rx_queues; i++) {
4388 if (adapter->rx_scrq[i]) {
4389 rx_list_p[i] =
4390 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4391 }
4392 }
4393
4394
4395 vlcd = (struct vnic_login_client_data *)
4396 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4397 login_buffer->client_data_offset =
4398 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4399 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4400
4401 vnic_add_client_data(adapter, vlcd);
4402
4403 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4404 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4405 netdev_dbg(adapter->netdev, "%016lx\n",
4406 ((unsigned long *)(adapter->login_buf))[i]);
4407 }
4408
4409 memset(&crq, 0, sizeof(crq));
4410 crq.login.first = IBMVNIC_CRQ_CMD;
4411 crq.login.cmd = LOGIN;
4412 crq.login.ioba = cpu_to_be32(buffer_token);
4413 crq.login.len = cpu_to_be32(buffer_size);
4414
4415 adapter->login_pending = true;
4416 rc = ibmvnic_send_crq(adapter, &crq);
4417 if (rc) {
4418 adapter->login_pending = false;
4419 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4420 goto buf_rsp_map_failed;
4421 }
4422
4423 return 0;
4424
4425buf_rsp_map_failed:
4426 kfree(login_rsp_buffer);
4427 adapter->login_rsp_buf = NULL;
4428buf_rsp_alloc_failed:
4429 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4430buf_map_failed:
4431 kfree(login_buffer);
4432 adapter->login_buf = NULL;
4433buf_alloc_failed:
4434 return -ENOMEM;
4435}
4436
4437static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4438 u32 len, u8 map_id)
4439{
4440 union ibmvnic_crq crq;
4441
4442 memset(&crq, 0, sizeof(crq));
4443 crq.request_map.first = IBMVNIC_CRQ_CMD;
4444 crq.request_map.cmd = REQUEST_MAP;
4445 crq.request_map.map_id = map_id;
4446 crq.request_map.ioba = cpu_to_be32(addr);
4447 crq.request_map.len = cpu_to_be32(len);
4448 return ibmvnic_send_crq(adapter, &crq);
4449}
4450
4451static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4452{
4453 union ibmvnic_crq crq;
4454
4455 memset(&crq, 0, sizeof(crq));
4456 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4457 crq.request_unmap.cmd = REQUEST_UNMAP;
4458 crq.request_unmap.map_id = map_id;
4459 return ibmvnic_send_crq(adapter, &crq);
4460}
4461
4462static void send_query_map(struct ibmvnic_adapter *adapter)
4463{
4464 union ibmvnic_crq crq;
4465
4466 memset(&crq, 0, sizeof(crq));
4467 crq.query_map.first = IBMVNIC_CRQ_CMD;
4468 crq.query_map.cmd = QUERY_MAP;
4469 ibmvnic_send_crq(adapter, &crq);
4470}
4471
4472
4473static void send_query_cap(struct ibmvnic_adapter *adapter)
4474{
4475 union ibmvnic_crq crq;
4476 int cap_reqs;
4477
4478
4479
4480
4481
4482 cap_reqs = 25;
4483
4484 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4485
4486 memset(&crq, 0, sizeof(crq));
4487 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4488 crq.query_capability.cmd = QUERY_CAPABILITY;
4489
4490 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4491 ibmvnic_send_crq(adapter, &crq);
4492 cap_reqs--;
4493
4494 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4495 ibmvnic_send_crq(adapter, &crq);
4496 cap_reqs--;
4497
4498 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4499 ibmvnic_send_crq(adapter, &crq);
4500 cap_reqs--;
4501
4502 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4503 ibmvnic_send_crq(adapter, &crq);
4504 cap_reqs--;
4505
4506 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4507 ibmvnic_send_crq(adapter, &crq);
4508 cap_reqs--;
4509
4510 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4511 ibmvnic_send_crq(adapter, &crq);
4512 cap_reqs--;
4513
4514 crq.query_capability.capability =
4515 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4516 ibmvnic_send_crq(adapter, &crq);
4517 cap_reqs--;
4518
4519 crq.query_capability.capability =
4520 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4521 ibmvnic_send_crq(adapter, &crq);
4522 cap_reqs--;
4523
4524 crq.query_capability.capability =
4525 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4526 ibmvnic_send_crq(adapter, &crq);
4527 cap_reqs--;
4528
4529 crq.query_capability.capability =
4530 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4531 ibmvnic_send_crq(adapter, &crq);
4532 cap_reqs--;
4533
4534 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4535 ibmvnic_send_crq(adapter, &crq);
4536 cap_reqs--;
4537
4538 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4539 ibmvnic_send_crq(adapter, &crq);
4540 cap_reqs--;
4541
4542 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4543 ibmvnic_send_crq(adapter, &crq);
4544 cap_reqs--;
4545
4546 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4547 ibmvnic_send_crq(adapter, &crq);
4548 cap_reqs--;
4549
4550 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4551 ibmvnic_send_crq(adapter, &crq);
4552 cap_reqs--;
4553
4554 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4555 ibmvnic_send_crq(adapter, &crq);
4556 cap_reqs--;
4557
4558 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4559 ibmvnic_send_crq(adapter, &crq);
4560 cap_reqs--;
4561
4562 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4563 ibmvnic_send_crq(adapter, &crq);
4564 cap_reqs--;
4565
4566 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4567 ibmvnic_send_crq(adapter, &crq);
4568 cap_reqs--;
4569
4570 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4571 ibmvnic_send_crq(adapter, &crq);
4572 cap_reqs--;
4573
4574 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4575 ibmvnic_send_crq(adapter, &crq);
4576 cap_reqs--;
4577
4578 crq.query_capability.capability =
4579 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4580 ibmvnic_send_crq(adapter, &crq);
4581 cap_reqs--;
4582
4583 crq.query_capability.capability =
4584 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4585 ibmvnic_send_crq(adapter, &crq);
4586 cap_reqs--;
4587
4588 crq.query_capability.capability =
4589 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4590 ibmvnic_send_crq(adapter, &crq);
4591 cap_reqs--;
4592
4593 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4594
4595 ibmvnic_send_crq(adapter, &crq);
4596 cap_reqs--;
4597
4598
4599
4600
4601 WARN_ON(cap_reqs != 0);
4602}
4603
4604static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4605{
4606 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4607 struct device *dev = &adapter->vdev->dev;
4608 union ibmvnic_crq crq;
4609
4610 adapter->ip_offload_tok =
4611 dma_map_single(dev,
4612 &adapter->ip_offload_buf,
4613 buf_sz,
4614 DMA_FROM_DEVICE);
4615
4616 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4617 if (!firmware_has_feature(FW_FEATURE_CMO))
4618 dev_err(dev, "Couldn't map offload buffer\n");
4619 return;
4620 }
4621
4622 memset(&crq, 0, sizeof(crq));
4623 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4624 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4625 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4626 crq.query_ip_offload.ioba =
4627 cpu_to_be32(adapter->ip_offload_tok);
4628
4629 ibmvnic_send_crq(adapter, &crq);
4630}
4631
4632static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4633{
4634 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4635 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4636 struct device *dev = &adapter->vdev->dev;
4637 netdev_features_t old_hw_features = 0;
4638 union ibmvnic_crq crq;
4639
4640 adapter->ip_offload_ctrl_tok =
4641 dma_map_single(dev,
4642 ctrl_buf,
4643 sizeof(adapter->ip_offload_ctrl),
4644 DMA_TO_DEVICE);
4645
4646 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4647 dev_err(dev, "Couldn't map ip offload control buffer\n");
4648 return;
4649 }
4650
4651 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4652 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4653 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4654 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4655 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4656 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4657 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4658 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4659 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4660 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4661
4662
4663 ctrl_buf->large_rx_ipv4 = 0;
4664 ctrl_buf->large_rx_ipv6 = 0;
4665
4666 if (adapter->state != VNIC_PROBING) {
4667 old_hw_features = adapter->netdev->hw_features;
4668 adapter->netdev->hw_features = 0;
4669 }
4670
4671 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4672
4673 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4674 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4675
4676 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4677 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4678
4679 if ((adapter->netdev->features &
4680 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4681 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4682
4683 if (buf->large_tx_ipv4)
4684 adapter->netdev->hw_features |= NETIF_F_TSO;
4685 if (buf->large_tx_ipv6)
4686 adapter->netdev->hw_features |= NETIF_F_TSO6;
4687
4688 if (adapter->state == VNIC_PROBING) {
4689 adapter->netdev->features |= adapter->netdev->hw_features;
4690 } else if (old_hw_features != adapter->netdev->hw_features) {
4691 netdev_features_t tmp = 0;
4692
4693
4694 adapter->netdev->features &= adapter->netdev->hw_features;
4695
4696 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4697 adapter->netdev->hw_features;
4698 adapter->netdev->features |=
4699 tmp & adapter->netdev->wanted_features;
4700 }
4701
4702 memset(&crq, 0, sizeof(crq));
4703 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4704 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4705 crq.control_ip_offload.len =
4706 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4707 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4708 ibmvnic_send_crq(adapter, &crq);
4709}
4710
4711static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4712 struct ibmvnic_adapter *adapter)
4713{
4714 struct device *dev = &adapter->vdev->dev;
4715
4716 if (crq->get_vpd_size_rsp.rc.code) {
4717 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4718 crq->get_vpd_size_rsp.rc.code);
4719 complete(&adapter->fw_done);
4720 return;
4721 }
4722
4723 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4724 complete(&adapter->fw_done);
4725}
4726
4727static void handle_vpd_rsp(union ibmvnic_crq *crq,
4728 struct ibmvnic_adapter *adapter)
4729{
4730 struct device *dev = &adapter->vdev->dev;
4731 unsigned char *substr = NULL;
4732 u8 fw_level_len = 0;
4733
4734 memset(adapter->fw_version, 0, 32);
4735
4736 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4737 DMA_FROM_DEVICE);
4738
4739 if (crq->get_vpd_rsp.rc.code) {
4740 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4741 crq->get_vpd_rsp.rc.code);
4742 goto complete;
4743 }
4744
4745
4746
4747
4748 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4749 if (!substr) {
4750 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4751 goto complete;
4752 }
4753
4754
4755 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4756 fw_level_len = *(substr + 2);
4757 } else {
4758 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4759 goto complete;
4760 }
4761
4762
4763 if ((substr + 3 + fw_level_len) <
4764 (adapter->vpd->buff + adapter->vpd->len)) {
4765 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4766 } else {
4767 dev_info(dev, "FW substr extrapolated VPD buff\n");
4768 }
4769
4770complete:
4771 if (adapter->fw_version[0] == '\0')
4772 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4773 complete(&adapter->fw_done);
4774}
4775
4776static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4777{
4778 struct device *dev = &adapter->vdev->dev;
4779 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4780 int i;
4781
4782 dma_unmap_single(dev, adapter->ip_offload_tok,
4783 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4784
4785 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4786 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4787 netdev_dbg(adapter->netdev, "%016lx\n",
4788 ((unsigned long *)(buf))[i]);
4789
4790 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4791 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4792 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4793 buf->tcp_ipv4_chksum);
4794 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4795 buf->tcp_ipv6_chksum);
4796 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4797 buf->udp_ipv4_chksum);
4798 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4799 buf->udp_ipv6_chksum);
4800 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4801 buf->large_tx_ipv4);
4802 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4803 buf->large_tx_ipv6);
4804 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4805 buf->large_rx_ipv4);
4806 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4807 buf->large_rx_ipv6);
4808 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4809 buf->max_ipv4_header_size);
4810 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4811 buf->max_ipv6_header_size);
4812 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4813 buf->max_tcp_header_size);
4814 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4815 buf->max_udp_header_size);
4816 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4817 buf->max_large_tx_size);
4818 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4819 buf->max_large_rx_size);
4820 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4821 buf->ipv6_extension_header);
4822 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4823 buf->tcp_pseudosum_req);
4824 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4825 buf->num_ipv6_ext_headers);
4826 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4827 buf->off_ipv6_ext_headers);
4828
4829 send_control_ip_offload(adapter);
4830}
4831
4832static const char *ibmvnic_fw_err_cause(u16 cause)
4833{
4834 switch (cause) {
4835 case ADAPTER_PROBLEM:
4836 return "adapter problem";
4837 case BUS_PROBLEM:
4838 return "bus problem";
4839 case FW_PROBLEM:
4840 return "firmware problem";
4841 case DD_PROBLEM:
4842 return "device driver problem";
4843 case EEH_RECOVERY:
4844 return "EEH recovery";
4845 case FW_UPDATED:
4846 return "firmware updated";
4847 case LOW_MEMORY:
4848 return "low Memory";
4849 default:
4850 return "unknown";
4851 }
4852}
4853
4854static void handle_error_indication(union ibmvnic_crq *crq,
4855 struct ibmvnic_adapter *adapter)
4856{
4857 struct device *dev = &adapter->vdev->dev;
4858 u16 cause;
4859
4860 cause = be16_to_cpu(crq->error_indication.error_cause);
4861
4862 dev_warn_ratelimited(dev,
4863 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4864 crq->error_indication.flags
4865 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4866 ibmvnic_fw_err_cause(cause));
4867
4868 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4869 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4870 else
4871 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4872}
4873
4874static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4875 struct ibmvnic_adapter *adapter)
4876{
4877 struct net_device *netdev = adapter->netdev;
4878 struct device *dev = &adapter->vdev->dev;
4879 long rc;
4880
4881 rc = crq->change_mac_addr_rsp.rc.code;
4882 if (rc) {
4883 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4884 goto out;
4885 }
4886
4887
4888
4889 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
4890 ether_addr_copy(adapter->mac_addr,
4891 &crq->change_mac_addr_rsp.mac_addr[0]);
4892out:
4893 complete(&adapter->fw_done);
4894 return rc;
4895}
4896
4897static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4898 struct ibmvnic_adapter *adapter)
4899{
4900 struct device *dev = &adapter->vdev->dev;
4901 u64 *req_value;
4902 char *name;
4903
4904 atomic_dec(&adapter->running_cap_crqs);
4905 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
4906 atomic_read(&adapter->running_cap_crqs));
4907 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4908 case REQ_TX_QUEUES:
4909 req_value = &adapter->req_tx_queues;
4910 name = "tx";
4911 break;
4912 case REQ_RX_QUEUES:
4913 req_value = &adapter->req_rx_queues;
4914 name = "rx";
4915 break;
4916 case REQ_RX_ADD_QUEUES:
4917 req_value = &adapter->req_rx_add_queues;
4918 name = "rx_add";
4919 break;
4920 case REQ_TX_ENTRIES_PER_SUBCRQ:
4921 req_value = &adapter->req_tx_entries_per_subcrq;
4922 name = "tx_entries_per_subcrq";
4923 break;
4924 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4925 req_value = &adapter->req_rx_add_entries_per_subcrq;
4926 name = "rx_add_entries_per_subcrq";
4927 break;
4928 case REQ_MTU:
4929 req_value = &adapter->req_mtu;
4930 name = "mtu";
4931 break;
4932 case PROMISC_REQUESTED:
4933 req_value = &adapter->promisc;
4934 name = "promisc";
4935 break;
4936 default:
4937 dev_err(dev, "Got invalid cap request rsp %d\n",
4938 crq->request_capability.capability);
4939 return;
4940 }
4941
4942 switch (crq->request_capability_rsp.rc.code) {
4943 case SUCCESS:
4944 break;
4945 case PARTIALSUCCESS:
4946 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4947 *req_value,
4948 (long)be64_to_cpu(crq->request_capability_rsp.number),
4949 name);
4950
4951 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4952 REQ_MTU) {
4953 pr_err("mtu of %llu is not supported. Reverting.\n",
4954 *req_value);
4955 *req_value = adapter->fallback.mtu;
4956 } else {
4957 *req_value =
4958 be64_to_cpu(crq->request_capability_rsp.number);
4959 }
4960
4961 send_request_cap(adapter, 1);
4962 return;
4963 default:
4964 dev_err(dev, "Error %d in request cap rsp\n",
4965 crq->request_capability_rsp.rc.code);
4966 return;
4967 }
4968
4969
4970 if (atomic_read(&adapter->running_cap_crqs) == 0)
4971 send_query_ip_offload(adapter);
4972}
4973
4974static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4975 struct ibmvnic_adapter *adapter)
4976{
4977 struct device *dev = &adapter->vdev->dev;
4978 struct net_device *netdev = adapter->netdev;
4979 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4980 struct ibmvnic_login_buffer *login = adapter->login_buf;
4981 u64 *tx_handle_array;
4982 u64 *rx_handle_array;
4983 int num_tx_pools;
4984 int num_rx_pools;
4985 u64 *size_array;
4986 int i;
4987
4988
4989
4990
4991 if (!adapter->login_pending) {
4992 netdev_warn(netdev, "Ignoring unexpected login response\n");
4993 return 0;
4994 }
4995 adapter->login_pending = false;
4996
4997 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4998 DMA_TO_DEVICE);
4999 dma_unmap_single(dev, adapter->login_rsp_buf_token,
5000 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
5001
5002
5003
5004
5005
5006 if (login_rsp_crq->generic.rc.code) {
5007 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5008 complete(&adapter->init_done);
5009 return 0;
5010 }
5011
5012 if (adapter->failover_pending) {
5013 adapter->init_done_rc = -EAGAIN;
5014 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5015 complete(&adapter->init_done);
5016
5017 return 0;
5018 }
5019
5020 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5021
5022 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5023 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5024 netdev_dbg(adapter->netdev, "%016lx\n",
5025 ((unsigned long *)(adapter->login_rsp_buf))[i]);
5026 }
5027
5028
5029 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5030 (be32_to_cpu(login->num_rxcomp_subcrqs) *
5031 adapter->req_rx_add_queues !=
5032 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5033 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5034 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5035 return -EIO;
5036 }
5037 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5038 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5039
5040
5041
5042 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5043
5044 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5045 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5046
5047 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5048 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5049 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5050 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5051
5052 for (i = 0; i < num_tx_pools; i++)
5053 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5054
5055 for (i = 0; i < num_rx_pools; i++)
5056 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5057
5058 adapter->num_active_tx_scrqs = num_tx_pools;
5059 adapter->num_active_rx_scrqs = num_rx_pools;
5060 release_login_rsp_buffer(adapter);
5061 release_login_buffer(adapter);
5062 complete(&adapter->init_done);
5063
5064 return 0;
5065}
5066
5067static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5068 struct ibmvnic_adapter *adapter)
5069{
5070 struct device *dev = &adapter->vdev->dev;
5071 long rc;
5072
5073 rc = crq->request_unmap_rsp.rc.code;
5074 if (rc)
5075 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5076}
5077
5078static void handle_query_map_rsp(union ibmvnic_crq *crq,
5079 struct ibmvnic_adapter *adapter)
5080{
5081 struct net_device *netdev = adapter->netdev;
5082 struct device *dev = &adapter->vdev->dev;
5083 long rc;
5084
5085 rc = crq->query_map_rsp.rc.code;
5086 if (rc) {
5087 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5088 return;
5089 }
5090 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5091 crq->query_map_rsp.page_size,
5092 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5093 __be32_to_cpu(crq->query_map_rsp.free_pages));
5094}
5095
5096static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5097 struct ibmvnic_adapter *adapter)
5098{
5099 struct net_device *netdev = adapter->netdev;
5100 struct device *dev = &adapter->vdev->dev;
5101 long rc;
5102
5103 atomic_dec(&adapter->running_cap_crqs);
5104 netdev_dbg(netdev, "Outstanding queries: %d\n",
5105 atomic_read(&adapter->running_cap_crqs));
5106 rc = crq->query_capability.rc.code;
5107 if (rc) {
5108 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5109 goto out;
5110 }
5111
5112 switch (be16_to_cpu(crq->query_capability.capability)) {
5113 case MIN_TX_QUEUES:
5114 adapter->min_tx_queues =
5115 be64_to_cpu(crq->query_capability.number);
5116 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5117 adapter->min_tx_queues);
5118 break;
5119 case MIN_RX_QUEUES:
5120 adapter->min_rx_queues =
5121 be64_to_cpu(crq->query_capability.number);
5122 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5123 adapter->min_rx_queues);
5124 break;
5125 case MIN_RX_ADD_QUEUES:
5126 adapter->min_rx_add_queues =
5127 be64_to_cpu(crq->query_capability.number);
5128 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5129 adapter->min_rx_add_queues);
5130 break;
5131 case MAX_TX_QUEUES:
5132 adapter->max_tx_queues =
5133 be64_to_cpu(crq->query_capability.number);
5134 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5135 adapter->max_tx_queues);
5136 break;
5137 case MAX_RX_QUEUES:
5138 adapter->max_rx_queues =
5139 be64_to_cpu(crq->query_capability.number);
5140 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5141 adapter->max_rx_queues);
5142 break;
5143 case MAX_RX_ADD_QUEUES:
5144 adapter->max_rx_add_queues =
5145 be64_to_cpu(crq->query_capability.number);
5146 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5147 adapter->max_rx_add_queues);
5148 break;
5149 case MIN_TX_ENTRIES_PER_SUBCRQ:
5150 adapter->min_tx_entries_per_subcrq =
5151 be64_to_cpu(crq->query_capability.number);
5152 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5153 adapter->min_tx_entries_per_subcrq);
5154 break;
5155 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5156 adapter->min_rx_add_entries_per_subcrq =
5157 be64_to_cpu(crq->query_capability.number);
5158 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5159 adapter->min_rx_add_entries_per_subcrq);
5160 break;
5161 case MAX_TX_ENTRIES_PER_SUBCRQ:
5162 adapter->max_tx_entries_per_subcrq =
5163 be64_to_cpu(crq->query_capability.number);
5164 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5165 adapter->max_tx_entries_per_subcrq);
5166 break;
5167 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5168 adapter->max_rx_add_entries_per_subcrq =
5169 be64_to_cpu(crq->query_capability.number);
5170 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5171 adapter->max_rx_add_entries_per_subcrq);
5172 break;
5173 case TCP_IP_OFFLOAD:
5174 adapter->tcp_ip_offload =
5175 be64_to_cpu(crq->query_capability.number);
5176 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5177 adapter->tcp_ip_offload);
5178 break;
5179 case PROMISC_SUPPORTED:
5180 adapter->promisc_supported =
5181 be64_to_cpu(crq->query_capability.number);
5182 netdev_dbg(netdev, "promisc_supported = %lld\n",
5183 adapter->promisc_supported);
5184 break;
5185 case MIN_MTU:
5186 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5187 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5188 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5189 break;
5190 case MAX_MTU:
5191 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5192 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5193 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5194 break;
5195 case MAX_MULTICAST_FILTERS:
5196 adapter->max_multicast_filters =
5197 be64_to_cpu(crq->query_capability.number);
5198 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5199 adapter->max_multicast_filters);
5200 break;
5201 case VLAN_HEADER_INSERTION:
5202 adapter->vlan_header_insertion =
5203 be64_to_cpu(crq->query_capability.number);
5204 if (adapter->vlan_header_insertion)
5205 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5206 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5207 adapter->vlan_header_insertion);
5208 break;
5209 case RX_VLAN_HEADER_INSERTION:
5210 adapter->rx_vlan_header_insertion =
5211 be64_to_cpu(crq->query_capability.number);
5212 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5213 adapter->rx_vlan_header_insertion);
5214 break;
5215 case MAX_TX_SG_ENTRIES:
5216 adapter->max_tx_sg_entries =
5217 be64_to_cpu(crq->query_capability.number);
5218 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5219 adapter->max_tx_sg_entries);
5220 break;
5221 case RX_SG_SUPPORTED:
5222 adapter->rx_sg_supported =
5223 be64_to_cpu(crq->query_capability.number);
5224 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5225 adapter->rx_sg_supported);
5226 break;
5227 case OPT_TX_COMP_SUB_QUEUES:
5228 adapter->opt_tx_comp_sub_queues =
5229 be64_to_cpu(crq->query_capability.number);
5230 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5231 adapter->opt_tx_comp_sub_queues);
5232 break;
5233 case OPT_RX_COMP_QUEUES:
5234 adapter->opt_rx_comp_queues =
5235 be64_to_cpu(crq->query_capability.number);
5236 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5237 adapter->opt_rx_comp_queues);
5238 break;
5239 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5240 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5241 be64_to_cpu(crq->query_capability.number);
5242 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5243 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5244 break;
5245 case OPT_TX_ENTRIES_PER_SUBCRQ:
5246 adapter->opt_tx_entries_per_subcrq =
5247 be64_to_cpu(crq->query_capability.number);
5248 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5249 adapter->opt_tx_entries_per_subcrq);
5250 break;
5251 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5252 adapter->opt_rxba_entries_per_subcrq =
5253 be64_to_cpu(crq->query_capability.number);
5254 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5255 adapter->opt_rxba_entries_per_subcrq);
5256 break;
5257 case TX_RX_DESC_REQ:
5258 adapter->tx_rx_desc_req = crq->query_capability.number;
5259 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5260 adapter->tx_rx_desc_req);
5261 break;
5262
5263 default:
5264 netdev_err(netdev, "Got invalid cap rsp %d\n",
5265 crq->query_capability.capability);
5266 }
5267
5268out:
5269 if (atomic_read(&adapter->running_cap_crqs) == 0)
5270 send_request_cap(adapter, 0);
5271}
5272
5273static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5274{
5275 union ibmvnic_crq crq;
5276 int rc;
5277
5278 memset(&crq, 0, sizeof(crq));
5279 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5280 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5281
5282 mutex_lock(&adapter->fw_lock);
5283 adapter->fw_done_rc = 0;
5284 reinit_completion(&adapter->fw_done);
5285
5286 rc = ibmvnic_send_crq(adapter, &crq);
5287 if (rc) {
5288 mutex_unlock(&adapter->fw_lock);
5289 return rc;
5290 }
5291
5292 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5293 if (rc) {
5294 mutex_unlock(&adapter->fw_lock);
5295 return rc;
5296 }
5297
5298 mutex_unlock(&adapter->fw_lock);
5299 return adapter->fw_done_rc ? -EIO : 0;
5300}
5301
5302static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5303 struct ibmvnic_adapter *adapter)
5304{
5305 struct net_device *netdev = adapter->netdev;
5306 int rc;
5307 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5308
5309 rc = crq->query_phys_parms_rsp.rc.code;
5310 if (rc) {
5311 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5312 return rc;
5313 }
5314 switch (rspeed) {
5315 case IBMVNIC_10MBPS:
5316 adapter->speed = SPEED_10;
5317 break;
5318 case IBMVNIC_100MBPS:
5319 adapter->speed = SPEED_100;
5320 break;
5321 case IBMVNIC_1GBPS:
5322 adapter->speed = SPEED_1000;
5323 break;
5324 case IBMVNIC_10GBPS:
5325 adapter->speed = SPEED_10000;
5326 break;
5327 case IBMVNIC_25GBPS:
5328 adapter->speed = SPEED_25000;
5329 break;
5330 case IBMVNIC_40GBPS:
5331 adapter->speed = SPEED_40000;
5332 break;
5333 case IBMVNIC_50GBPS:
5334 adapter->speed = SPEED_50000;
5335 break;
5336 case IBMVNIC_100GBPS:
5337 adapter->speed = SPEED_100000;
5338 break;
5339 case IBMVNIC_200GBPS:
5340 adapter->speed = SPEED_200000;
5341 break;
5342 default:
5343 if (netif_carrier_ok(netdev))
5344 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5345 adapter->speed = SPEED_UNKNOWN;
5346 }
5347 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5348 adapter->duplex = DUPLEX_FULL;
5349 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5350 adapter->duplex = DUPLEX_HALF;
5351 else
5352 adapter->duplex = DUPLEX_UNKNOWN;
5353
5354 return rc;
5355}
5356
5357static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5358 struct ibmvnic_adapter *adapter)
5359{
5360 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5361 struct net_device *netdev = adapter->netdev;
5362 struct device *dev = &adapter->vdev->dev;
5363 u64 *u64_crq = (u64 *)crq;
5364 long rc;
5365
5366 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5367 (unsigned long)cpu_to_be64(u64_crq[0]),
5368 (unsigned long)cpu_to_be64(u64_crq[1]));
5369 switch (gen_crq->first) {
5370 case IBMVNIC_CRQ_INIT_RSP:
5371 switch (gen_crq->cmd) {
5372 case IBMVNIC_CRQ_INIT:
5373 dev_info(dev, "Partner initialized\n");
5374 adapter->from_passive_init = true;
5375
5376
5377
5378 adapter->login_pending = false;
5379
5380 if (adapter->state == VNIC_DOWN)
5381 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5382 else
5383 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5384
5385 if (rc && rc != -EBUSY) {
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395 netdev_err(netdev,
5396 "Error %ld scheduling failover reset\n",
5397 rc);
5398 adapter->failover_pending = false;
5399 }
5400
5401 if (!completion_done(&adapter->init_done)) {
5402 if (!adapter->init_done_rc)
5403 adapter->init_done_rc = -EAGAIN;
5404 complete(&adapter->init_done);
5405 }
5406
5407 break;
5408 case IBMVNIC_CRQ_INIT_COMPLETE:
5409 dev_info(dev, "Partner initialization complete\n");
5410 adapter->crq.active = true;
5411 send_version_xchg(adapter);
5412 break;
5413 default:
5414 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5415 }
5416 return;
5417 case IBMVNIC_CRQ_XPORT_EVENT:
5418 netif_carrier_off(netdev);
5419 adapter->crq.active = false;
5420
5421
5422
5423 if (!completion_done(&adapter->fw_done)) {
5424 adapter->fw_done_rc = -EIO;
5425 complete(&adapter->fw_done);
5426 }
5427
5428
5429 if (!completion_done(&adapter->init_done)) {
5430 adapter->init_done_rc = -EAGAIN;
5431 complete(&adapter->init_done);
5432 }
5433
5434 if (!completion_done(&adapter->stats_done))
5435 complete(&adapter->stats_done);
5436 if (test_bit(0, &adapter->resetting))
5437 adapter->force_reset_recovery = true;
5438 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5439 dev_info(dev, "Migrated, re-enabling adapter\n");
5440 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5441 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5442 dev_info(dev, "Backing device failover detected\n");
5443 adapter->failover_pending = true;
5444 } else {
5445
5446 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5447 gen_crq->cmd);
5448 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5449 }
5450 return;
5451 case IBMVNIC_CRQ_CMD_RSP:
5452 break;
5453 default:
5454 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5455 gen_crq->first);
5456 return;
5457 }
5458
5459 switch (gen_crq->cmd) {
5460 case VERSION_EXCHANGE_RSP:
5461 rc = crq->version_exchange_rsp.rc.code;
5462 if (rc) {
5463 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5464 break;
5465 }
5466 ibmvnic_version =
5467 be16_to_cpu(crq->version_exchange_rsp.version);
5468 dev_info(dev, "Partner protocol version is %d\n",
5469 ibmvnic_version);
5470 send_query_cap(adapter);
5471 break;
5472 case QUERY_CAPABILITY_RSP:
5473 handle_query_cap_rsp(crq, adapter);
5474 break;
5475 case QUERY_MAP_RSP:
5476 handle_query_map_rsp(crq, adapter);
5477 break;
5478 case REQUEST_MAP_RSP:
5479 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5480 complete(&adapter->fw_done);
5481 break;
5482 case REQUEST_UNMAP_RSP:
5483 handle_request_unmap_rsp(crq, adapter);
5484 break;
5485 case REQUEST_CAPABILITY_RSP:
5486 handle_request_cap_rsp(crq, adapter);
5487 break;
5488 case LOGIN_RSP:
5489 netdev_dbg(netdev, "Got Login Response\n");
5490 handle_login_rsp(crq, adapter);
5491 break;
5492 case LOGICAL_LINK_STATE_RSP:
5493 netdev_dbg(netdev,
5494 "Got Logical Link State Response, state: %d rc: %d\n",
5495 crq->logical_link_state_rsp.link_state,
5496 crq->logical_link_state_rsp.rc.code);
5497 adapter->logical_link_state =
5498 crq->logical_link_state_rsp.link_state;
5499 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5500 complete(&adapter->init_done);
5501 break;
5502 case LINK_STATE_INDICATION:
5503 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5504 adapter->phys_link_state =
5505 crq->link_state_indication.phys_link_state;
5506 adapter->logical_link_state =
5507 crq->link_state_indication.logical_link_state;
5508 if (adapter->phys_link_state && adapter->logical_link_state)
5509 netif_carrier_on(netdev);
5510 else
5511 netif_carrier_off(netdev);
5512 break;
5513 case CHANGE_MAC_ADDR_RSP:
5514 netdev_dbg(netdev, "Got MAC address change Response\n");
5515 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5516 break;
5517 case ERROR_INDICATION:
5518 netdev_dbg(netdev, "Got Error Indication\n");
5519 handle_error_indication(crq, adapter);
5520 break;
5521 case REQUEST_STATISTICS_RSP:
5522 netdev_dbg(netdev, "Got Statistics Response\n");
5523 complete(&adapter->stats_done);
5524 break;
5525 case QUERY_IP_OFFLOAD_RSP:
5526 netdev_dbg(netdev, "Got Query IP offload Response\n");
5527 handle_query_ip_offload_rsp(adapter);
5528 break;
5529 case MULTICAST_CTRL_RSP:
5530 netdev_dbg(netdev, "Got multicast control Response\n");
5531 break;
5532 case CONTROL_IP_OFFLOAD_RSP:
5533 netdev_dbg(netdev, "Got Control IP offload Response\n");
5534 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5535 sizeof(adapter->ip_offload_ctrl),
5536 DMA_TO_DEVICE);
5537 complete(&adapter->init_done);
5538 break;
5539 case COLLECT_FW_TRACE_RSP:
5540 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5541 complete(&adapter->fw_done);
5542 break;
5543 case GET_VPD_SIZE_RSP:
5544 handle_vpd_size_rsp(crq, adapter);
5545 break;
5546 case GET_VPD_RSP:
5547 handle_vpd_rsp(crq, adapter);
5548 break;
5549 case QUERY_PHYS_PARMS_RSP:
5550 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5551 complete(&adapter->fw_done);
5552 break;
5553 default:
5554 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5555 gen_crq->cmd);
5556 }
5557}
5558
5559static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5560{
5561 struct ibmvnic_adapter *adapter = instance;
5562
5563 tasklet_schedule(&adapter->tasklet);
5564 return IRQ_HANDLED;
5565}
5566
5567static void ibmvnic_tasklet(struct tasklet_struct *t)
5568{
5569 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5570 struct ibmvnic_crq_queue *queue = &adapter->crq;
5571 union ibmvnic_crq *crq;
5572 unsigned long flags;
5573
5574 spin_lock_irqsave(&queue->lock, flags);
5575
5576
5577 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5578
5579
5580
5581
5582
5583 dma_rmb();
5584 ibmvnic_handle_crq(crq, adapter);
5585 crq->generic.first = 0;
5586 }
5587
5588 spin_unlock_irqrestore(&queue->lock, flags);
5589}
5590
5591static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5592{
5593 struct vio_dev *vdev = adapter->vdev;
5594 int rc;
5595
5596 do {
5597 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5598 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5599
5600 if (rc)
5601 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5602
5603 return rc;
5604}
5605
5606static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5607{
5608 struct ibmvnic_crq_queue *crq = &adapter->crq;
5609 struct device *dev = &adapter->vdev->dev;
5610 struct vio_dev *vdev = adapter->vdev;
5611 int rc;
5612
5613
5614 do {
5615 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5616 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5617
5618
5619 if (!crq->msgs)
5620 return -EINVAL;
5621
5622 memset(crq->msgs, 0, PAGE_SIZE);
5623 crq->cur = 0;
5624 crq->active = false;
5625
5626
5627 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5628 crq->msg_token, PAGE_SIZE);
5629
5630 if (rc == H_CLOSED)
5631
5632 dev_warn(dev, "Partner adapter not ready\n");
5633 else if (rc != 0)
5634 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5635
5636 return rc;
5637}
5638
5639static void release_crq_queue(struct ibmvnic_adapter *adapter)
5640{
5641 struct ibmvnic_crq_queue *crq = &adapter->crq;
5642 struct vio_dev *vdev = adapter->vdev;
5643 long rc;
5644
5645 if (!crq->msgs)
5646 return;
5647
5648 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5649 free_irq(vdev->irq, adapter);
5650 tasklet_kill(&adapter->tasklet);
5651 do {
5652 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5653 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5654
5655 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5656 DMA_BIDIRECTIONAL);
5657 free_page((unsigned long)crq->msgs);
5658 crq->msgs = NULL;
5659 crq->active = false;
5660}
5661
5662static int init_crq_queue(struct ibmvnic_adapter *adapter)
5663{
5664 struct ibmvnic_crq_queue *crq = &adapter->crq;
5665 struct device *dev = &adapter->vdev->dev;
5666 struct vio_dev *vdev = adapter->vdev;
5667 int rc, retrc = -ENOMEM;
5668
5669 if (crq->msgs)
5670 return 0;
5671
5672 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5673
5674
5675 if (!crq->msgs)
5676 return -ENOMEM;
5677
5678 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5679 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5680 DMA_BIDIRECTIONAL);
5681 if (dma_mapping_error(dev, crq->msg_token))
5682 goto map_failed;
5683
5684 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5685 crq->msg_token, PAGE_SIZE);
5686
5687 if (rc == H_RESOURCE)
5688
5689 rc = ibmvnic_reset_crq(adapter);
5690 retrc = rc;
5691
5692 if (rc == H_CLOSED) {
5693 dev_warn(dev, "Partner adapter not ready\n");
5694 } else if (rc) {
5695 dev_warn(dev, "Error %d opening adapter\n", rc);
5696 goto reg_crq_failed;
5697 }
5698
5699 retrc = 0;
5700
5701 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5702
5703 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5704 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5705 adapter->vdev->unit_address);
5706 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5707 if (rc) {
5708 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5709 vdev->irq, rc);
5710 goto req_irq_failed;
5711 }
5712
5713 rc = vio_enable_interrupts(vdev);
5714 if (rc) {
5715 dev_err(dev, "Error %d enabling interrupts\n", rc);
5716 goto req_irq_failed;
5717 }
5718
5719 crq->cur = 0;
5720 spin_lock_init(&crq->lock);
5721
5722
5723 tasklet_schedule(&adapter->tasklet);
5724
5725 return retrc;
5726
5727req_irq_failed:
5728 tasklet_kill(&adapter->tasklet);
5729 do {
5730 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5731 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5732reg_crq_failed:
5733 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5734map_failed:
5735 free_page((unsigned long)crq->msgs);
5736 crq->msgs = NULL;
5737 return retrc;
5738}
5739
5740static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5741{
5742 struct device *dev = &adapter->vdev->dev;
5743 unsigned long timeout = msecs_to_jiffies(20000);
5744 u64 old_num_rx_queues = adapter->req_rx_queues;
5745 u64 old_num_tx_queues = adapter->req_tx_queues;
5746 int rc;
5747
5748 adapter->from_passive_init = false;
5749
5750 rc = ibmvnic_send_crq_init(adapter);
5751 if (rc) {
5752 dev_err(dev, "Send crq init failed with error %d\n", rc);
5753 return rc;
5754 }
5755
5756 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5757 dev_err(dev, "Initialization sequence timed out\n");
5758 return -ETIMEDOUT;
5759 }
5760
5761 if (adapter->init_done_rc) {
5762 release_crq_queue(adapter);
5763 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
5764 return adapter->init_done_rc;
5765 }
5766
5767 if (adapter->from_passive_init) {
5768 adapter->state = VNIC_OPEN;
5769 adapter->from_passive_init = false;
5770 dev_err(dev, "CRQ-init failed, passive-init\n");
5771 return -EINVAL;
5772 }
5773
5774 if (reset &&
5775 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5776 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5777 if (adapter->req_rx_queues != old_num_rx_queues ||
5778 adapter->req_tx_queues != old_num_tx_queues) {
5779 release_sub_crqs(adapter, 0);
5780 rc = init_sub_crqs(adapter);
5781 } else {
5782 rc = reset_sub_crq_queues(adapter);
5783 }
5784 } else {
5785 rc = init_sub_crqs(adapter);
5786 }
5787
5788 if (rc) {
5789 dev_err(dev, "Initialization of sub crqs failed\n");
5790 release_crq_queue(adapter);
5791 return rc;
5792 }
5793
5794 rc = init_sub_crq_irqs(adapter);
5795 if (rc) {
5796 dev_err(dev, "Failed to initialize sub crq irqs\n");
5797 release_crq_queue(adapter);
5798 }
5799
5800 return rc;
5801}
5802
5803static struct device_attribute dev_attr_failover;
5804
5805static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5806{
5807 struct ibmvnic_adapter *adapter;
5808 struct net_device *netdev;
5809 unsigned char *mac_addr_p;
5810 unsigned long flags;
5811 bool init_success;
5812 int rc;
5813
5814 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5815 dev->unit_address);
5816
5817 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5818 VETH_MAC_ADDR, NULL);
5819 if (!mac_addr_p) {
5820 dev_err(&dev->dev,
5821 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5822 __FILE__, __LINE__);
5823 return 0;
5824 }
5825
5826 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5827 IBMVNIC_MAX_QUEUES);
5828 if (!netdev)
5829 return -ENOMEM;
5830
5831 adapter = netdev_priv(netdev);
5832 adapter->state = VNIC_PROBING;
5833 dev_set_drvdata(&dev->dev, netdev);
5834 adapter->vdev = dev;
5835 adapter->netdev = netdev;
5836 adapter->login_pending = false;
5837 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
5838
5839 bitmap_set(adapter->map_ids, 0, 1);
5840
5841 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5842 eth_hw_addr_set(netdev, adapter->mac_addr);
5843 netdev->irq = dev->irq;
5844 netdev->netdev_ops = &ibmvnic_netdev_ops;
5845 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5846 SET_NETDEV_DEV(netdev, &dev->dev);
5847
5848 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5849 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5850 __ibmvnic_delayed_reset);
5851 INIT_LIST_HEAD(&adapter->rwi_list);
5852 spin_lock_init(&adapter->rwi_lock);
5853 spin_lock_init(&adapter->state_lock);
5854 mutex_init(&adapter->fw_lock);
5855 init_completion(&adapter->probe_done);
5856 init_completion(&adapter->init_done);
5857 init_completion(&adapter->fw_done);
5858 init_completion(&adapter->reset_done);
5859 init_completion(&adapter->stats_done);
5860 clear_bit(0, &adapter->resetting);
5861 adapter->prev_rx_buf_sz = 0;
5862 adapter->prev_mtu = 0;
5863
5864 init_success = false;
5865 do {
5866 reinit_init_done(adapter);
5867
5868
5869
5870
5871 adapter->failover_pending = false;
5872
5873
5874
5875
5876
5877 release_crq_queue(adapter);
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889 spin_lock_irqsave(&adapter->rwi_lock, flags);
5890 flush_reset_queue(adapter);
5891 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
5892
5893 rc = init_crq_queue(adapter);
5894 if (rc) {
5895 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5896 rc);
5897 goto ibmvnic_init_fail;
5898 }
5899
5900 rc = ibmvnic_reset_init(adapter, false);
5901 } while (rc == -EAGAIN);
5902
5903
5904
5905
5906
5907
5908 if (!rc)
5909 init_success = true;
5910
5911 rc = init_stats_buffers(adapter);
5912 if (rc)
5913 goto ibmvnic_init_fail;
5914
5915 rc = init_stats_token(adapter);
5916 if (rc)
5917 goto ibmvnic_stats_fail;
5918
5919 rc = device_create_file(&dev->dev, &dev_attr_failover);
5920 if (rc)
5921 goto ibmvnic_dev_file_err;
5922
5923 netif_carrier_off(netdev);
5924
5925 if (init_success) {
5926 adapter->state = VNIC_PROBED;
5927 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5928 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5929 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5930 } else {
5931 adapter->state = VNIC_DOWN;
5932 }
5933
5934 adapter->wait_for_reset = false;
5935 adapter->last_reset_time = jiffies;
5936
5937 rc = register_netdev(netdev);
5938 if (rc) {
5939 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5940 goto ibmvnic_register_fail;
5941 }
5942 dev_info(&dev->dev, "ibmvnic registered\n");
5943
5944 complete(&adapter->probe_done);
5945
5946 return 0;
5947
5948ibmvnic_register_fail:
5949 device_remove_file(&dev->dev, &dev_attr_failover);
5950
5951ibmvnic_dev_file_err:
5952 release_stats_token(adapter);
5953
5954ibmvnic_stats_fail:
5955 release_stats_buffers(adapter);
5956
5957ibmvnic_init_fail:
5958 release_sub_crqs(adapter, 1);
5959 release_crq_queue(adapter);
5960
5961
5962
5963
5964 adapter->state = VNIC_REMOVING;
5965 complete(&adapter->probe_done);
5966 flush_work(&adapter->ibmvnic_reset);
5967 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5968
5969 flush_reset_queue(adapter);
5970
5971 mutex_destroy(&adapter->fw_lock);
5972 free_netdev(netdev);
5973
5974 return rc;
5975}
5976
5977static void ibmvnic_remove(struct vio_dev *dev)
5978{
5979 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5980 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5981 unsigned long flags;
5982
5983 spin_lock_irqsave(&adapter->state_lock, flags);
5984
5985
5986
5987
5988
5989
5990
5991
5992 spin_lock(&adapter->rwi_lock);
5993 adapter->state = VNIC_REMOVING;
5994 spin_unlock(&adapter->rwi_lock);
5995
5996 spin_unlock_irqrestore(&adapter->state_lock, flags);
5997
5998 flush_work(&adapter->ibmvnic_reset);
5999 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6000
6001 rtnl_lock();
6002 unregister_netdevice(netdev);
6003
6004 release_resources(adapter);
6005 release_rx_pools(adapter);
6006 release_tx_pools(adapter);
6007 release_sub_crqs(adapter, 1);
6008 release_crq_queue(adapter);
6009
6010 release_stats_token(adapter);
6011 release_stats_buffers(adapter);
6012
6013 adapter->state = VNIC_REMOVED;
6014
6015 rtnl_unlock();
6016 mutex_destroy(&adapter->fw_lock);
6017 device_remove_file(&dev->dev, &dev_attr_failover);
6018 free_netdev(netdev);
6019 dev_set_drvdata(&dev->dev, NULL);
6020}
6021
6022static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6023 const char *buf, size_t count)
6024{
6025 struct net_device *netdev = dev_get_drvdata(dev);
6026 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6027 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6028 __be64 session_token;
6029 long rc;
6030
6031 if (!sysfs_streq(buf, "1"))
6032 return -EINVAL;
6033
6034 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6035 H_GET_SESSION_TOKEN, 0, 0, 0);
6036 if (rc) {
6037 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6038 rc);
6039 goto last_resort;
6040 }
6041
6042 session_token = (__be64)retbuf[0];
6043 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6044 be64_to_cpu(session_token));
6045 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6046 H_SESSION_ERR_DETECTED, session_token, 0, 0);
6047 if (rc) {
6048 netdev_err(netdev,
6049 "H_VIOCTL initiated failover failed, rc %ld\n",
6050 rc);
6051 goto last_resort;
6052 }
6053
6054 return count;
6055
6056last_resort:
6057 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6058 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6059
6060 return count;
6061}
6062static DEVICE_ATTR_WO(failover);
6063
6064static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6065{
6066 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6067 struct ibmvnic_adapter *adapter;
6068 struct iommu_table *tbl;
6069 unsigned long ret = 0;
6070 int i;
6071
6072 tbl = get_iommu_table_base(&vdev->dev);
6073
6074
6075 if (!netdev)
6076 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6077
6078 adapter = netdev_priv(netdev);
6079
6080 ret += PAGE_SIZE;
6081 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6082
6083 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6084 ret += 4 * PAGE_SIZE;
6085
6086 for (i = 0; i < adapter->num_active_rx_pools; i++)
6087 ret += adapter->rx_pool[i].size *
6088 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6089
6090 return ret;
6091}
6092
6093static int ibmvnic_resume(struct device *dev)
6094{
6095 struct net_device *netdev = dev_get_drvdata(dev);
6096 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6097
6098 if (adapter->state != VNIC_OPEN)
6099 return 0;
6100
6101 tasklet_schedule(&adapter->tasklet);
6102
6103 return 0;
6104}
6105
6106static const struct vio_device_id ibmvnic_device_table[] = {
6107 {"network", "IBM,vnic"},
6108 {"", "" }
6109};
6110MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6111
6112static const struct dev_pm_ops ibmvnic_pm_ops = {
6113 .resume = ibmvnic_resume
6114};
6115
6116static struct vio_driver ibmvnic_driver = {
6117 .id_table = ibmvnic_device_table,
6118 .probe = ibmvnic_probe,
6119 .remove = ibmvnic_remove,
6120 .get_desired_dma = ibmvnic_get_desired_dma,
6121 .name = ibmvnic_driver_name,
6122 .pm = &ibmvnic_pm_ops,
6123};
6124
6125
6126static int __init ibmvnic_module_init(void)
6127{
6128 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6129 IBMVNIC_DRIVER_VERSION);
6130
6131 return vio_register_driver(&ibmvnic_driver);
6132}
6133
6134static void __exit ibmvnic_module_exit(void)
6135{
6136 vio_unregister_driver(&ibmvnic_driver);
6137}
6138
6139module_init(ibmvnic_module_init);
6140module_exit(ibmvnic_module_exit);
6141