1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/if_arp.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/ipv6.h>
66#include <linux/irq.h>
67#include <linux/kthread.h>
68#include <linux/seq_file.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
77#include <linux/workqueue.h>
78#include <linux/if_vlan.h>
79#include <linux/utsname.h>
80
81#include "ibmvnic.h"
82
83static const char ibmvnic_driver_name[] = "ibmvnic";
84static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
85
86MODULE_AUTHOR("Santiago Leon");
87MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88MODULE_LICENSE("GPL");
89MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
90
91static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92static int ibmvnic_remove(struct vio_dev *);
93static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110static int ibmvnic_poll(struct napi_struct *napi, int data);
111static void send_map_query(struct ibmvnic_adapter *adapter);
112static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113static int send_request_unmap(struct ibmvnic_adapter *, u8);
114static int send_login(struct ibmvnic_adapter *adapter);
115static void send_cap_queries(struct ibmvnic_adapter *adapter);
116static int init_sub_crqs(struct ibmvnic_adapter *);
117static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118static int ibmvnic_init(struct ibmvnic_adapter *);
119static int ibmvnic_reset_init(struct ibmvnic_adapter *);
120static void release_crq_queue(struct ibmvnic_adapter *);
121static int __ibmvnic_set_mac(struct net_device *, u8 *);
122static int init_crq_queue(struct ibmvnic_adapter *adapter);
123static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
124
125struct ibmvnic_stat {
126 char name[ETH_GSTRING_LEN];
127 int offset;
128};
129
130#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
131 offsetof(struct ibmvnic_statistics, stat))
132#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
133
134static const struct ibmvnic_stat ibmvnic_stats[] = {
135 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
136 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
137 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
138 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
139 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
140 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
141 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
142 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
143 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
144 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
145 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
146 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
147 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
148 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
149 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
150 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
151 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
152 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
153 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
154 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
155 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
156 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
157};
158
159static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
160 unsigned long length, unsigned long *number,
161 unsigned long *irq)
162{
163 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
164 long rc;
165
166 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
167 *number = retbuf[0];
168 *irq = retbuf[1];
169
170 return rc;
171}
172
173static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
174 struct ibmvnic_long_term_buff *ltb, int size)
175{
176 struct device *dev = &adapter->vdev->dev;
177 int rc;
178
179 ltb->size = size;
180 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
181 GFP_KERNEL);
182
183 if (!ltb->buff) {
184 dev_err(dev, "Couldn't alloc long term buffer\n");
185 return -ENOMEM;
186 }
187 ltb->map_id = adapter->map_id;
188 adapter->map_id++;
189
190 init_completion(&adapter->fw_done);
191 rc = send_request_map(adapter, ltb->addr,
192 ltb->size, ltb->map_id);
193 if (rc) {
194 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
195 return rc;
196 }
197 wait_for_completion(&adapter->fw_done);
198
199 if (adapter->fw_done_rc) {
200 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
201 adapter->fw_done_rc);
202 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
203 return -1;
204 }
205 return 0;
206}
207
208static void free_long_term_buff(struct ibmvnic_adapter *adapter,
209 struct ibmvnic_long_term_buff *ltb)
210{
211 struct device *dev = &adapter->vdev->dev;
212
213 if (!ltb->buff)
214 return;
215
216 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
217 adapter->reset_reason != VNIC_RESET_MOBILITY)
218 send_request_unmap(adapter, ltb->map_id);
219 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
220}
221
222static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
223 struct ibmvnic_long_term_buff *ltb)
224{
225 int rc;
226
227 memset(ltb->buff, 0, ltb->size);
228
229 init_completion(&adapter->fw_done);
230 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
231 if (rc)
232 return rc;
233 wait_for_completion(&adapter->fw_done);
234
235 if (adapter->fw_done_rc) {
236 dev_info(&adapter->vdev->dev,
237 "Reset failed, attempting to free and reallocate buffer\n");
238 free_long_term_buff(adapter, ltb);
239 return alloc_long_term_buff(adapter, ltb, ltb->size);
240 }
241 return 0;
242}
243
244static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
245{
246 int i;
247
248 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
249 i++)
250 adapter->rx_pool[i].active = 0;
251}
252
253static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
254 struct ibmvnic_rx_pool *pool)
255{
256 int count = pool->size - atomic_read(&pool->available);
257 struct device *dev = &adapter->vdev->dev;
258 int buffers_added = 0;
259 unsigned long lpar_rc;
260 union sub_crq sub_crq;
261 struct sk_buff *skb;
262 unsigned int offset;
263 dma_addr_t dma_addr;
264 unsigned char *dst;
265 u64 *handle_array;
266 int shift = 0;
267 int index;
268 int i;
269
270 if (!pool->active)
271 return;
272
273 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
274 be32_to_cpu(adapter->login_rsp_buf->
275 off_rxadd_subcrqs));
276
277 for (i = 0; i < count; ++i) {
278 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
279 if (!skb) {
280 dev_err(dev, "Couldn't replenish rx buff\n");
281 adapter->replenish_no_mem++;
282 break;
283 }
284
285 index = pool->free_map[pool->next_free];
286
287 if (pool->rx_buff[index].skb)
288 dev_err(dev, "Inconsistent free_map!\n");
289
290
291 offset = index * pool->buff_size;
292 dst = pool->long_term_buff.buff + offset;
293 memset(dst, 0, pool->buff_size);
294 dma_addr = pool->long_term_buff.addr + offset;
295 pool->rx_buff[index].data = dst;
296
297 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
298 pool->rx_buff[index].dma = dma_addr;
299 pool->rx_buff[index].skb = skb;
300 pool->rx_buff[index].pool_index = pool->index;
301 pool->rx_buff[index].size = pool->buff_size;
302
303 memset(&sub_crq, 0, sizeof(sub_crq));
304 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
305 sub_crq.rx_add.correlator =
306 cpu_to_be64((u64)&pool->rx_buff[index]);
307 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
308 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
309
310
311
312
313
314
315#ifdef __LITTLE_ENDIAN__
316 shift = 8;
317#endif
318 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
319
320 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
321 &sub_crq);
322 if (lpar_rc != H_SUCCESS)
323 goto failure;
324
325 buffers_added++;
326 adapter->replenish_add_buff_success++;
327 pool->next_free = (pool->next_free + 1) % pool->size;
328 }
329 atomic_add(buffers_added, &pool->available);
330 return;
331
332failure:
333 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
334 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
335 pool->free_map[pool->next_free] = index;
336 pool->rx_buff[index].skb = NULL;
337
338 dev_kfree_skb_any(skb);
339 adapter->replenish_add_buff_failure++;
340 atomic_add(buffers_added, &pool->available);
341
342 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
343
344
345
346
347
348 deactivate_rx_pools(adapter);
349 netif_carrier_off(adapter->netdev);
350 }
351}
352
353static void replenish_pools(struct ibmvnic_adapter *adapter)
354{
355 int i;
356
357 adapter->replenish_task_cycles++;
358 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
359 i++) {
360 if (adapter->rx_pool[i].active)
361 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
362 }
363}
364
365static void release_stats_buffers(struct ibmvnic_adapter *adapter)
366{
367 kfree(adapter->tx_stats_buffers);
368 kfree(adapter->rx_stats_buffers);
369 adapter->tx_stats_buffers = NULL;
370 adapter->rx_stats_buffers = NULL;
371}
372
373static int init_stats_buffers(struct ibmvnic_adapter *adapter)
374{
375 adapter->tx_stats_buffers =
376 kcalloc(IBMVNIC_MAX_QUEUES,
377 sizeof(struct ibmvnic_tx_queue_stats),
378 GFP_KERNEL);
379 if (!adapter->tx_stats_buffers)
380 return -ENOMEM;
381
382 adapter->rx_stats_buffers =
383 kcalloc(IBMVNIC_MAX_QUEUES,
384 sizeof(struct ibmvnic_rx_queue_stats),
385 GFP_KERNEL);
386 if (!adapter->rx_stats_buffers)
387 return -ENOMEM;
388
389 return 0;
390}
391
392static void release_stats_token(struct ibmvnic_adapter *adapter)
393{
394 struct device *dev = &adapter->vdev->dev;
395
396 if (!adapter->stats_token)
397 return;
398
399 dma_unmap_single(dev, adapter->stats_token,
400 sizeof(struct ibmvnic_statistics),
401 DMA_FROM_DEVICE);
402 adapter->stats_token = 0;
403}
404
405static int init_stats_token(struct ibmvnic_adapter *adapter)
406{
407 struct device *dev = &adapter->vdev->dev;
408 dma_addr_t stok;
409
410 stok = dma_map_single(dev, &adapter->stats,
411 sizeof(struct ibmvnic_statistics),
412 DMA_FROM_DEVICE);
413 if (dma_mapping_error(dev, stok)) {
414 dev_err(dev, "Couldn't map stats buffer\n");
415 return -1;
416 }
417
418 adapter->stats_token = stok;
419 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
420 return 0;
421}
422
423static int reset_rx_pools(struct ibmvnic_adapter *adapter)
424{
425 struct ibmvnic_rx_pool *rx_pool;
426 int rx_scrqs;
427 int i, j, rc;
428 u64 *size_array;
429
430 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
431 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
432
433 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
434 for (i = 0; i < rx_scrqs; i++) {
435 rx_pool = &adapter->rx_pool[i];
436
437 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
438
439 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
440 free_long_term_buff(adapter, &rx_pool->long_term_buff);
441 rx_pool->buff_size = be64_to_cpu(size_array[i]);
442 rc = alloc_long_term_buff(adapter,
443 &rx_pool->long_term_buff,
444 rx_pool->size *
445 rx_pool->buff_size);
446 } else {
447 rc = reset_long_term_buff(adapter,
448 &rx_pool->long_term_buff);
449 }
450
451 if (rc)
452 return rc;
453
454 for (j = 0; j < rx_pool->size; j++)
455 rx_pool->free_map[j] = j;
456
457 memset(rx_pool->rx_buff, 0,
458 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
459
460 atomic_set(&rx_pool->available, 0);
461 rx_pool->next_alloc = 0;
462 rx_pool->next_free = 0;
463 rx_pool->active = 1;
464 }
465
466 return 0;
467}
468
469static void release_rx_pools(struct ibmvnic_adapter *adapter)
470{
471 struct ibmvnic_rx_pool *rx_pool;
472 int i, j;
473
474 if (!adapter->rx_pool)
475 return;
476
477 for (i = 0; i < adapter->num_active_rx_pools; i++) {
478 rx_pool = &adapter->rx_pool[i];
479
480 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
481
482 kfree(rx_pool->free_map);
483 free_long_term_buff(adapter, &rx_pool->long_term_buff);
484
485 if (!rx_pool->rx_buff)
486 continue;
487
488 for (j = 0; j < rx_pool->size; j++) {
489 if (rx_pool->rx_buff[j].skb) {
490 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
491 rx_pool->rx_buff[j].skb = NULL;
492 }
493 }
494
495 kfree(rx_pool->rx_buff);
496 }
497
498 kfree(adapter->rx_pool);
499 adapter->rx_pool = NULL;
500 adapter->num_active_rx_pools = 0;
501}
502
503static int init_rx_pools(struct net_device *netdev)
504{
505 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
506 struct device *dev = &adapter->vdev->dev;
507 struct ibmvnic_rx_pool *rx_pool;
508 int rxadd_subcrqs;
509 u64 *size_array;
510 int i, j;
511
512 rxadd_subcrqs =
513 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
514 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
515 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
516
517 adapter->rx_pool = kcalloc(rxadd_subcrqs,
518 sizeof(struct ibmvnic_rx_pool),
519 GFP_KERNEL);
520 if (!adapter->rx_pool) {
521 dev_err(dev, "Failed to allocate rx pools\n");
522 return -1;
523 }
524
525 adapter->num_active_rx_pools = rxadd_subcrqs;
526
527 for (i = 0; i < rxadd_subcrqs; i++) {
528 rx_pool = &adapter->rx_pool[i];
529
530 netdev_dbg(adapter->netdev,
531 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
532 i, adapter->req_rx_add_entries_per_subcrq,
533 be64_to_cpu(size_array[i]));
534
535 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
536 rx_pool->index = i;
537 rx_pool->buff_size = be64_to_cpu(size_array[i]);
538 rx_pool->active = 1;
539
540 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
541 GFP_KERNEL);
542 if (!rx_pool->free_map) {
543 release_rx_pools(adapter);
544 return -1;
545 }
546
547 rx_pool->rx_buff = kcalloc(rx_pool->size,
548 sizeof(struct ibmvnic_rx_buff),
549 GFP_KERNEL);
550 if (!rx_pool->rx_buff) {
551 dev_err(dev, "Couldn't alloc rx buffers\n");
552 release_rx_pools(adapter);
553 return -1;
554 }
555
556 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
557 rx_pool->size * rx_pool->buff_size)) {
558 release_rx_pools(adapter);
559 return -1;
560 }
561
562 for (j = 0; j < rx_pool->size; ++j)
563 rx_pool->free_map[j] = j;
564
565 atomic_set(&rx_pool->available, 0);
566 rx_pool->next_alloc = 0;
567 rx_pool->next_free = 0;
568 }
569
570 return 0;
571}
572
573static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
574 struct ibmvnic_tx_pool *tx_pool)
575{
576 int rc, i;
577
578 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
579 if (rc)
580 return rc;
581
582 memset(tx_pool->tx_buff, 0,
583 tx_pool->num_buffers *
584 sizeof(struct ibmvnic_tx_buff));
585
586 for (i = 0; i < tx_pool->num_buffers; i++)
587 tx_pool->free_map[i] = i;
588
589 tx_pool->consumer_index = 0;
590 tx_pool->producer_index = 0;
591
592 return 0;
593}
594
595static int reset_tx_pools(struct ibmvnic_adapter *adapter)
596{
597 int tx_scrqs;
598 int i, rc;
599
600 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
601 for (i = 0; i < tx_scrqs; i++) {
602 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
603 if (rc)
604 return rc;
605 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
606 if (rc)
607 return rc;
608 }
609
610 return 0;
611}
612
613static void release_vpd_data(struct ibmvnic_adapter *adapter)
614{
615 if (!adapter->vpd)
616 return;
617
618 kfree(adapter->vpd->buff);
619 kfree(adapter->vpd);
620
621 adapter->vpd = NULL;
622}
623
624static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
625 struct ibmvnic_tx_pool *tx_pool)
626{
627 kfree(tx_pool->tx_buff);
628 kfree(tx_pool->free_map);
629 free_long_term_buff(adapter, &tx_pool->long_term_buff);
630}
631
632static void release_tx_pools(struct ibmvnic_adapter *adapter)
633{
634 int i;
635
636 if (!adapter->tx_pool)
637 return;
638
639 for (i = 0; i < adapter->num_active_tx_pools; i++) {
640 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
641 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
642 }
643
644 kfree(adapter->tx_pool);
645 adapter->tx_pool = NULL;
646 kfree(adapter->tso_pool);
647 adapter->tso_pool = NULL;
648 adapter->num_active_tx_pools = 0;
649}
650
651static int init_one_tx_pool(struct net_device *netdev,
652 struct ibmvnic_tx_pool *tx_pool,
653 int num_entries, int buf_size)
654{
655 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
656 int i;
657
658 tx_pool->tx_buff = kcalloc(num_entries,
659 sizeof(struct ibmvnic_tx_buff),
660 GFP_KERNEL);
661 if (!tx_pool->tx_buff)
662 return -1;
663
664 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
665 num_entries * buf_size))
666 return -1;
667
668 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
669 if (!tx_pool->free_map)
670 return -1;
671
672 for (i = 0; i < num_entries; i++)
673 tx_pool->free_map[i] = i;
674
675 tx_pool->consumer_index = 0;
676 tx_pool->producer_index = 0;
677 tx_pool->num_buffers = num_entries;
678 tx_pool->buf_size = buf_size;
679
680 return 0;
681}
682
683static int init_tx_pools(struct net_device *netdev)
684{
685 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
686 int tx_subcrqs;
687 int i, rc;
688
689 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
690 adapter->tx_pool = kcalloc(tx_subcrqs,
691 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
692 if (!adapter->tx_pool)
693 return -1;
694
695 adapter->tso_pool = kcalloc(tx_subcrqs,
696 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
697 if (!adapter->tso_pool)
698 return -1;
699
700 adapter->num_active_tx_pools = tx_subcrqs;
701
702 for (i = 0; i < tx_subcrqs; i++) {
703 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
704 adapter->req_tx_entries_per_subcrq,
705 adapter->req_mtu + VLAN_HLEN);
706 if (rc) {
707 release_tx_pools(adapter);
708 return rc;
709 }
710
711 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
712 IBMVNIC_TSO_BUFS,
713 IBMVNIC_TSO_BUF_SZ);
714 if (rc) {
715 release_tx_pools(adapter);
716 return rc;
717 }
718 }
719
720 return 0;
721}
722
723static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
724{
725 int i;
726
727 if (adapter->napi_enabled)
728 return;
729
730 for (i = 0; i < adapter->req_rx_queues; i++)
731 napi_enable(&adapter->napi[i]);
732
733 adapter->napi_enabled = true;
734}
735
736static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
737{
738 int i;
739
740 if (!adapter->napi_enabled)
741 return;
742
743 for (i = 0; i < adapter->req_rx_queues; i++) {
744 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
745 napi_disable(&adapter->napi[i]);
746 }
747
748 adapter->napi_enabled = false;
749}
750
751static int init_napi(struct ibmvnic_adapter *adapter)
752{
753 int i;
754
755 adapter->napi = kcalloc(adapter->req_rx_queues,
756 sizeof(struct napi_struct), GFP_KERNEL);
757 if (!adapter->napi)
758 return -ENOMEM;
759
760 for (i = 0; i < adapter->req_rx_queues; i++) {
761 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
762 netif_napi_add(adapter->netdev, &adapter->napi[i],
763 ibmvnic_poll, NAPI_POLL_WEIGHT);
764 }
765
766 adapter->num_active_rx_napi = adapter->req_rx_queues;
767 return 0;
768}
769
770static void release_napi(struct ibmvnic_adapter *adapter)
771{
772 int i;
773
774 if (!adapter->napi)
775 return;
776
777 for (i = 0; i < adapter->num_active_rx_napi; i++) {
778 if (&adapter->napi[i]) {
779 netdev_dbg(adapter->netdev,
780 "Releasing napi[%d]\n", i);
781 netif_napi_del(&adapter->napi[i]);
782 }
783 }
784
785 kfree(adapter->napi);
786 adapter->napi = NULL;
787 adapter->num_active_rx_napi = 0;
788 adapter->napi_enabled = false;
789}
790
791static int ibmvnic_login(struct net_device *netdev)
792{
793 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
794 unsigned long timeout = msecs_to_jiffies(30000);
795 int retry_count = 0;
796 bool retry;
797 int rc;
798
799 do {
800 retry = false;
801 if (retry_count > IBMVNIC_MAX_QUEUES) {
802 netdev_warn(netdev, "Login attempts exceeded\n");
803 return -1;
804 }
805
806 adapter->init_done_rc = 0;
807 reinit_completion(&adapter->init_done);
808 rc = send_login(adapter);
809 if (rc) {
810 netdev_warn(netdev, "Unable to login\n");
811 return rc;
812 }
813
814 if (!wait_for_completion_timeout(&adapter->init_done,
815 timeout)) {
816 netdev_warn(netdev, "Login timed out\n");
817 return -1;
818 }
819
820 if (adapter->init_done_rc == PARTIALSUCCESS) {
821 retry_count++;
822 release_sub_crqs(adapter, 1);
823
824 retry = true;
825 netdev_dbg(netdev,
826 "Received partial success, retrying...\n");
827 adapter->init_done_rc = 0;
828 reinit_completion(&adapter->init_done);
829 send_cap_queries(adapter);
830 if (!wait_for_completion_timeout(&adapter->init_done,
831 timeout)) {
832 netdev_warn(netdev,
833 "Capabilities query timed out\n");
834 return -1;
835 }
836
837 rc = init_sub_crqs(adapter);
838 if (rc) {
839 netdev_warn(netdev,
840 "SCRQ initialization failed\n");
841 return -1;
842 }
843
844 rc = init_sub_crq_irqs(adapter);
845 if (rc) {
846 netdev_warn(netdev,
847 "SCRQ irq initialization failed\n");
848 return -1;
849 }
850 } else if (adapter->init_done_rc) {
851 netdev_warn(netdev, "Adapter login failed\n");
852 return -1;
853 }
854 } while (retry);
855
856 __ibmvnic_set_mac(netdev, adapter->mac_addr);
857
858 return 0;
859}
860
861static void release_login_buffer(struct ibmvnic_adapter *adapter)
862{
863 kfree(adapter->login_buf);
864 adapter->login_buf = NULL;
865}
866
867static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
868{
869 kfree(adapter->login_rsp_buf);
870 adapter->login_rsp_buf = NULL;
871}
872
873static void release_resources(struct ibmvnic_adapter *adapter)
874{
875 release_vpd_data(adapter);
876
877 release_tx_pools(adapter);
878 release_rx_pools(adapter);
879
880 release_napi(adapter);
881 release_login_rsp_buffer(adapter);
882}
883
884static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
885{
886 struct net_device *netdev = adapter->netdev;
887 unsigned long timeout = msecs_to_jiffies(30000);
888 union ibmvnic_crq crq;
889 bool resend;
890 int rc;
891
892 netdev_dbg(netdev, "setting link state %d\n", link_state);
893
894 memset(&crq, 0, sizeof(crq));
895 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
896 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
897 crq.logical_link_state.link_state = link_state;
898
899 do {
900 resend = false;
901
902 reinit_completion(&adapter->init_done);
903 rc = ibmvnic_send_crq(adapter, &crq);
904 if (rc) {
905 netdev_err(netdev, "Failed to set link state\n");
906 return rc;
907 }
908
909 if (!wait_for_completion_timeout(&adapter->init_done,
910 timeout)) {
911 netdev_err(netdev, "timeout setting link state\n");
912 return -1;
913 }
914
915 if (adapter->init_done_rc == 1) {
916
917 mdelay(1000);
918 resend = true;
919 } else if (adapter->init_done_rc) {
920 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
921 adapter->init_done_rc);
922 return adapter->init_done_rc;
923 }
924 } while (resend);
925
926 return 0;
927}
928
929static int set_real_num_queues(struct net_device *netdev)
930{
931 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
932 int rc;
933
934 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
935 adapter->req_tx_queues, adapter->req_rx_queues);
936
937 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
938 if (rc) {
939 netdev_err(netdev, "failed to set the number of tx queues\n");
940 return rc;
941 }
942
943 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
944 if (rc)
945 netdev_err(netdev, "failed to set the number of rx queues\n");
946
947 return rc;
948}
949
950static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
951{
952 struct device *dev = &adapter->vdev->dev;
953 union ibmvnic_crq crq;
954 int len = 0;
955 int rc;
956
957 if (adapter->vpd->buff)
958 len = adapter->vpd->len;
959
960 init_completion(&adapter->fw_done);
961 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
962 crq.get_vpd_size.cmd = GET_VPD_SIZE;
963 rc = ibmvnic_send_crq(adapter, &crq);
964 if (rc)
965 return rc;
966 wait_for_completion(&adapter->fw_done);
967
968 if (!adapter->vpd->len)
969 return -ENODATA;
970
971 if (!adapter->vpd->buff)
972 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
973 else if (adapter->vpd->len != len)
974 adapter->vpd->buff =
975 krealloc(adapter->vpd->buff,
976 adapter->vpd->len, GFP_KERNEL);
977
978 if (!adapter->vpd->buff) {
979 dev_err(dev, "Could allocate VPD buffer\n");
980 return -ENOMEM;
981 }
982
983 adapter->vpd->dma_addr =
984 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
985 DMA_FROM_DEVICE);
986 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
987 dev_err(dev, "Could not map VPD buffer\n");
988 kfree(adapter->vpd->buff);
989 adapter->vpd->buff = NULL;
990 return -ENOMEM;
991 }
992
993 reinit_completion(&adapter->fw_done);
994 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
995 crq.get_vpd.cmd = GET_VPD;
996 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
997 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
998 rc = ibmvnic_send_crq(adapter, &crq);
999 if (rc) {
1000 kfree(adapter->vpd->buff);
1001 adapter->vpd->buff = NULL;
1002 return rc;
1003 }
1004 wait_for_completion(&adapter->fw_done);
1005
1006 return 0;
1007}
1008
1009static int init_resources(struct ibmvnic_adapter *adapter)
1010{
1011 struct net_device *netdev = adapter->netdev;
1012 int rc;
1013
1014 rc = set_real_num_queues(netdev);
1015 if (rc)
1016 return rc;
1017
1018 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1019 if (!adapter->vpd)
1020 return -ENOMEM;
1021
1022
1023 rc = ibmvnic_get_vpd(adapter);
1024 if (rc) {
1025 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1026 return rc;
1027 }
1028
1029 adapter->map_id = 1;
1030
1031 rc = init_napi(adapter);
1032 if (rc)
1033 return rc;
1034
1035 send_map_query(adapter);
1036
1037 rc = init_rx_pools(netdev);
1038 if (rc)
1039 return rc;
1040
1041 rc = init_tx_pools(netdev);
1042 return rc;
1043}
1044
1045static int __ibmvnic_open(struct net_device *netdev)
1046{
1047 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1048 enum vnic_state prev_state = adapter->state;
1049 int i, rc;
1050
1051 adapter->state = VNIC_OPENING;
1052 replenish_pools(adapter);
1053 ibmvnic_napi_enable(adapter);
1054
1055
1056
1057
1058 for (i = 0; i < adapter->req_rx_queues; i++) {
1059 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1060 if (prev_state == VNIC_CLOSED)
1061 enable_irq(adapter->rx_scrq[i]->irq);
1062 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1063 }
1064
1065 for (i = 0; i < adapter->req_tx_queues; i++) {
1066 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1067 if (prev_state == VNIC_CLOSED)
1068 enable_irq(adapter->tx_scrq[i]->irq);
1069 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1070 }
1071
1072 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1073 if (rc) {
1074 for (i = 0; i < adapter->req_rx_queues; i++)
1075 napi_disable(&adapter->napi[i]);
1076 release_resources(adapter);
1077 return rc;
1078 }
1079
1080 netif_tx_start_all_queues(netdev);
1081
1082 if (prev_state == VNIC_CLOSED) {
1083 for (i = 0; i < adapter->req_rx_queues; i++)
1084 napi_schedule(&adapter->napi[i]);
1085 }
1086
1087 adapter->state = VNIC_OPEN;
1088 return rc;
1089}
1090
1091static int ibmvnic_open(struct net_device *netdev)
1092{
1093 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1094 int rc;
1095
1096
1097
1098
1099 if (adapter->failover_pending) {
1100 adapter->state = VNIC_OPEN;
1101 return 0;
1102 }
1103
1104 if (adapter->state != VNIC_CLOSED) {
1105 rc = ibmvnic_login(netdev);
1106 if (rc)
1107 return rc;
1108
1109 rc = init_resources(adapter);
1110 if (rc) {
1111 netdev_err(netdev, "failed to initialize resources\n");
1112 release_resources(adapter);
1113 return rc;
1114 }
1115 }
1116
1117 rc = __ibmvnic_open(netdev);
1118
1119 return rc;
1120}
1121
1122static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1123{
1124 struct ibmvnic_rx_pool *rx_pool;
1125 struct ibmvnic_rx_buff *rx_buff;
1126 u64 rx_entries;
1127 int rx_scrqs;
1128 int i, j;
1129
1130 if (!adapter->rx_pool)
1131 return;
1132
1133 rx_scrqs = adapter->num_active_rx_pools;
1134 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1135
1136
1137 for (i = 0; i < rx_scrqs; i++) {
1138 rx_pool = &adapter->rx_pool[i];
1139 if (!rx_pool || !rx_pool->rx_buff)
1140 continue;
1141
1142 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1143 for (j = 0; j < rx_entries; j++) {
1144 rx_buff = &rx_pool->rx_buff[j];
1145 if (rx_buff && rx_buff->skb) {
1146 dev_kfree_skb_any(rx_buff->skb);
1147 rx_buff->skb = NULL;
1148 }
1149 }
1150 }
1151}
1152
1153static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1154 struct ibmvnic_tx_pool *tx_pool)
1155{
1156 struct ibmvnic_tx_buff *tx_buff;
1157 u64 tx_entries;
1158 int i;
1159
1160 if (!tx_pool || !tx_pool->tx_buff)
1161 return;
1162
1163 tx_entries = tx_pool->num_buffers;
1164
1165 for (i = 0; i < tx_entries; i++) {
1166 tx_buff = &tx_pool->tx_buff[i];
1167 if (tx_buff && tx_buff->skb) {
1168 dev_kfree_skb_any(tx_buff->skb);
1169 tx_buff->skb = NULL;
1170 }
1171 }
1172}
1173
1174static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1175{
1176 int tx_scrqs;
1177 int i;
1178
1179 if (!adapter->tx_pool || !adapter->tso_pool)
1180 return;
1181
1182 tx_scrqs = adapter->num_active_tx_pools;
1183
1184
1185 for (i = 0; i < tx_scrqs; i++) {
1186 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1187 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1188 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1189 }
1190}
1191
1192static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1193{
1194 struct net_device *netdev = adapter->netdev;
1195 int i;
1196
1197 if (adapter->tx_scrq) {
1198 for (i = 0; i < adapter->req_tx_queues; i++)
1199 if (adapter->tx_scrq[i]->irq) {
1200 netdev_dbg(netdev,
1201 "Disabling tx_scrq[%d] irq\n", i);
1202 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1203 disable_irq(adapter->tx_scrq[i]->irq);
1204 }
1205 }
1206
1207 if (adapter->rx_scrq) {
1208 for (i = 0; i < adapter->req_rx_queues; i++) {
1209 if (adapter->rx_scrq[i]->irq) {
1210 netdev_dbg(netdev,
1211 "Disabling rx_scrq[%d] irq\n", i);
1212 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1213 disable_irq(adapter->rx_scrq[i]->irq);
1214 }
1215 }
1216 }
1217}
1218
1219static void ibmvnic_cleanup(struct net_device *netdev)
1220{
1221 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1222
1223
1224 if (test_bit(0, &adapter->resetting))
1225 netif_tx_disable(netdev);
1226 else
1227 netif_tx_stop_all_queues(netdev);
1228
1229 ibmvnic_napi_disable(adapter);
1230 ibmvnic_disable_irqs(adapter);
1231
1232 clean_rx_pools(adapter);
1233 clean_tx_pools(adapter);
1234}
1235
1236static int __ibmvnic_close(struct net_device *netdev)
1237{
1238 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1239 int rc = 0;
1240
1241 adapter->state = VNIC_CLOSING;
1242 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1243 if (rc)
1244 return rc;
1245 adapter->state = VNIC_CLOSED;
1246 return 0;
1247}
1248
1249static int ibmvnic_close(struct net_device *netdev)
1250{
1251 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1252 int rc;
1253
1254
1255
1256
1257 if (adapter->failover_pending) {
1258 adapter->state = VNIC_CLOSED;
1259 return 0;
1260 }
1261
1262 rc = __ibmvnic_close(netdev);
1263 ibmvnic_cleanup(netdev);
1264
1265 return rc;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1280 int *hdr_len, u8 *hdr_data)
1281{
1282 int len = 0;
1283 u8 *hdr;
1284
1285 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1286 hdr_len[0] = sizeof(struct vlan_ethhdr);
1287 else
1288 hdr_len[0] = sizeof(struct ethhdr);
1289
1290 if (skb->protocol == htons(ETH_P_IP)) {
1291 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1292 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1293 hdr_len[2] = tcp_hdrlen(skb);
1294 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1295 hdr_len[2] = sizeof(struct udphdr);
1296 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1297 hdr_len[1] = sizeof(struct ipv6hdr);
1298 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1299 hdr_len[2] = tcp_hdrlen(skb);
1300 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1301 hdr_len[2] = sizeof(struct udphdr);
1302 } else if (skb->protocol == htons(ETH_P_ARP)) {
1303 hdr_len[1] = arp_hdr_len(skb->dev);
1304 hdr_len[2] = 0;
1305 }
1306
1307 memset(hdr_data, 0, 120);
1308 if ((hdr_field >> 6) & 1) {
1309 hdr = skb_mac_header(skb);
1310 memcpy(hdr_data, hdr, hdr_len[0]);
1311 len += hdr_len[0];
1312 }
1313
1314 if ((hdr_field >> 5) & 1) {
1315 hdr = skb_network_header(skb);
1316 memcpy(hdr_data + len, hdr, hdr_len[1]);
1317 len += hdr_len[1];
1318 }
1319
1320 if ((hdr_field >> 4) & 1) {
1321 hdr = skb_transport_header(skb);
1322 memcpy(hdr_data + len, hdr, hdr_len[2]);
1323 len += hdr_len[2];
1324 }
1325 return len;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1341 union sub_crq *scrq_arr)
1342{
1343 union sub_crq hdr_desc;
1344 int tmp_len = len;
1345 int num_descs = 0;
1346 u8 *data, *cur;
1347 int tmp;
1348
1349 while (tmp_len > 0) {
1350 cur = hdr_data + len - tmp_len;
1351
1352 memset(&hdr_desc, 0, sizeof(hdr_desc));
1353 if (cur != hdr_data) {
1354 data = hdr_desc.hdr_ext.data;
1355 tmp = tmp_len > 29 ? 29 : tmp_len;
1356 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1357 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1358 hdr_desc.hdr_ext.len = tmp;
1359 } else {
1360 data = hdr_desc.hdr.data;
1361 tmp = tmp_len > 24 ? 24 : tmp_len;
1362 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1363 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1364 hdr_desc.hdr.len = tmp;
1365 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1366 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1367 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1368 hdr_desc.hdr.flag = hdr_field << 1;
1369 }
1370 memcpy(data, cur, tmp);
1371 tmp_len -= tmp;
1372 *scrq_arr = hdr_desc;
1373 scrq_arr++;
1374 num_descs++;
1375 }
1376
1377 return num_descs;
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1392 int *num_entries, u8 hdr_field)
1393{
1394 int hdr_len[3] = {0, 0, 0};
1395 int tot_len;
1396 u8 *hdr_data = txbuff->hdr_data;
1397
1398 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1399 txbuff->hdr_data);
1400 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1401 txbuff->indir_arr + 1);
1402}
1403
1404static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1405 struct net_device *netdev)
1406{
1407
1408
1409
1410
1411
1412
1413 if (skb->len < netdev->extended->min_mtu)
1414 return skb_put_padto(skb, netdev->extended->min_mtu);
1415
1416 return 0;
1417}
1418
1419static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1420{
1421 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1422 int queue_num = skb_get_queue_mapping(skb);
1423 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1424 struct device *dev = &adapter->vdev->dev;
1425 struct ibmvnic_tx_buff *tx_buff = NULL;
1426 struct ibmvnic_sub_crq_queue *tx_scrq;
1427 struct ibmvnic_tx_pool *tx_pool;
1428 unsigned int tx_send_failed = 0;
1429 unsigned int tx_map_failed = 0;
1430 unsigned int tx_dropped = 0;
1431 unsigned int tx_packets = 0;
1432 unsigned int tx_bytes = 0;
1433 dma_addr_t data_dma_addr;
1434 struct netdev_queue *txq;
1435 unsigned long lpar_rc;
1436 union sub_crq tx_crq;
1437 unsigned int offset;
1438 int num_entries = 1;
1439 unsigned char *dst;
1440 u64 *handle_array;
1441 int index = 0;
1442 u8 proto = 0;
1443 int ret = 0;
1444
1445 if (test_bit(0, &adapter->resetting)) {
1446 if (!netif_subqueue_stopped(netdev, skb))
1447 netif_stop_subqueue(netdev, queue_num);
1448 dev_kfree_skb_any(skb);
1449
1450 tx_send_failed++;
1451 tx_dropped++;
1452 ret = NETDEV_TX_OK;
1453 goto out;
1454 }
1455
1456 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1457 tx_dropped++;
1458 tx_send_failed++;
1459 ret = NETDEV_TX_OK;
1460 goto out;
1461 }
1462 if (skb_is_gso(skb))
1463 tx_pool = &adapter->tso_pool[queue_num];
1464 else
1465 tx_pool = &adapter->tx_pool[queue_num];
1466
1467 tx_scrq = adapter->tx_scrq[queue_num];
1468 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1469 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1470 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1471
1472 index = tx_pool->free_map[tx_pool->consumer_index];
1473
1474 if (index == IBMVNIC_INVALID_MAP) {
1475 dev_kfree_skb_any(skb);
1476 tx_send_failed++;
1477 tx_dropped++;
1478 ret = NETDEV_TX_OK;
1479 goto out;
1480 }
1481
1482 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1483
1484 offset = index * tx_pool->buf_size;
1485 dst = tx_pool->long_term_buff.buff + offset;
1486 memset(dst, 0, tx_pool->buf_size);
1487 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1488
1489 if (skb_shinfo(skb)->nr_frags) {
1490 int cur, i;
1491
1492
1493 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1494 cur = skb_headlen(skb);
1495
1496
1497 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1498 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1499
1500 memcpy(dst + cur,
1501 page_address(skb_frag_page(frag)) +
1502 frag->page_offset, skb_frag_size(frag));
1503 cur += skb_frag_size(frag);
1504 }
1505 } else {
1506 skb_copy_from_linear_data(skb, dst, skb->len);
1507 }
1508
1509 tx_pool->consumer_index =
1510 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1511
1512 tx_buff = &tx_pool->tx_buff[index];
1513 tx_buff->skb = skb;
1514 tx_buff->data_dma[0] = data_dma_addr;
1515 tx_buff->data_len[0] = skb->len;
1516 tx_buff->index = index;
1517 tx_buff->pool_index = queue_num;
1518 tx_buff->last_frag = true;
1519
1520 memset(&tx_crq, 0, sizeof(tx_crq));
1521 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1522 tx_crq.v1.type = IBMVNIC_TX_DESC;
1523 tx_crq.v1.n_crq_elem = 1;
1524 tx_crq.v1.n_sge = 1;
1525 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1526
1527 if (skb_is_gso(skb))
1528 tx_crq.v1.correlator =
1529 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1530 else
1531 tx_crq.v1.correlator = cpu_to_be32(index);
1532 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1533 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1534 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1535
1536 if (adapter->vlan_header_insertion) {
1537 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1538 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1539 }
1540
1541 if (skb->protocol == htons(ETH_P_IP)) {
1542 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1543 proto = ip_hdr(skb)->protocol;
1544 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1545 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1546 proto = ipv6_hdr(skb)->nexthdr;
1547 }
1548
1549 if (proto == IPPROTO_TCP)
1550 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1551 else if (proto == IPPROTO_UDP)
1552 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1553
1554 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1555 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1556 hdrs += 2;
1557 }
1558 if (skb_is_gso(skb)) {
1559 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1560 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1561 hdrs += 2;
1562 }
1563
1564 if ((*hdrs >> 7) & 1) {
1565 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1566 tx_crq.v1.n_crq_elem = num_entries;
1567 tx_buff->num_entries = num_entries;
1568 tx_buff->indir_arr[0] = tx_crq;
1569 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1570 sizeof(tx_buff->indir_arr),
1571 DMA_TO_DEVICE);
1572 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1573 dev_kfree_skb_any(skb);
1574 tx_buff->skb = NULL;
1575 if (!firmware_has_feature(FW_FEATURE_CMO))
1576 dev_err(dev, "tx: unable to map descriptor array\n");
1577 tx_map_failed++;
1578 tx_dropped++;
1579 ret = NETDEV_TX_OK;
1580 goto tx_err_out;
1581 }
1582 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1583 (u64)tx_buff->indir_dma,
1584 (u64)num_entries);
1585 dma_unmap_single(dev, tx_buff->indir_dma,
1586 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1587 } else {
1588 tx_buff->num_entries = num_entries;
1589 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1590 &tx_crq);
1591 }
1592 if (lpar_rc != H_SUCCESS) {
1593 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1594 dev_err_ratelimited(dev, "tx: send failed\n");
1595 dev_kfree_skb_any(skb);
1596 tx_buff->skb = NULL;
1597
1598 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1599
1600
1601
1602
1603
1604 netif_tx_stop_all_queues(netdev);
1605 netif_carrier_off(netdev);
1606 }
1607
1608 tx_send_failed++;
1609 tx_dropped++;
1610 ret = NETDEV_TX_OK;
1611 goto tx_err_out;
1612 }
1613
1614 if (atomic_add_return(num_entries, &tx_scrq->used)
1615 >= adapter->req_tx_entries_per_subcrq) {
1616 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1617 netif_stop_subqueue(netdev, queue_num);
1618 }
1619
1620 tx_packets++;
1621 tx_bytes += skb->len;
1622 txq->trans_start = jiffies;
1623 ret = NETDEV_TX_OK;
1624 goto out;
1625
1626tx_err_out:
1627
1628 if (tx_pool->consumer_index == 0)
1629 tx_pool->consumer_index =
1630 tx_pool->num_buffers - 1;
1631 else
1632 tx_pool->consumer_index--;
1633 tx_pool->free_map[tx_pool->consumer_index] = index;
1634out:
1635 netdev->stats.tx_dropped += tx_dropped;
1636 netdev->stats.tx_bytes += tx_bytes;
1637 netdev->stats.tx_packets += tx_packets;
1638 adapter->tx_send_failed += tx_send_failed;
1639 adapter->tx_map_failed += tx_map_failed;
1640 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1641 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1642 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1643
1644 return ret;
1645}
1646
1647static void ibmvnic_set_multi(struct net_device *netdev)
1648{
1649 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1650 struct netdev_hw_addr *ha;
1651 union ibmvnic_crq crq;
1652
1653 memset(&crq, 0, sizeof(crq));
1654 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1655 crq.request_capability.cmd = REQUEST_CAPABILITY;
1656
1657 if (netdev->flags & IFF_PROMISC) {
1658 if (!adapter->promisc_supported)
1659 return;
1660 } else {
1661 if (netdev->flags & IFF_ALLMULTI) {
1662
1663 memset(&crq, 0, sizeof(crq));
1664 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1665 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1666 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1667 ibmvnic_send_crq(adapter, &crq);
1668 } else if (netdev_mc_empty(netdev)) {
1669
1670 memset(&crq, 0, sizeof(crq));
1671 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1672 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1673 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1674 ibmvnic_send_crq(adapter, &crq);
1675 } else {
1676
1677 netdev_for_each_mc_addr(ha, netdev) {
1678 memset(&crq, 0, sizeof(crq));
1679 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1680 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1681 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1682 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1683 ha->addr);
1684 ibmvnic_send_crq(adapter, &crq);
1685 }
1686 }
1687 }
1688}
1689
1690static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1691{
1692 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1693 union ibmvnic_crq crq;
1694 int rc;
1695
1696 if (!is_valid_ether_addr(dev_addr)) {
1697 rc = -EADDRNOTAVAIL;
1698 goto err;
1699 }
1700
1701 memset(&crq, 0, sizeof(crq));
1702 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1703 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1704 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1705
1706 init_completion(&adapter->fw_done);
1707 rc = ibmvnic_send_crq(adapter, &crq);
1708 if (rc) {
1709 rc = -EIO;
1710 goto err;
1711 }
1712
1713 wait_for_completion(&adapter->fw_done);
1714
1715 if (adapter->fw_done_rc) {
1716 rc = -EIO;
1717 goto err;
1718 }
1719
1720 return 0;
1721err:
1722 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1723 return rc;
1724}
1725
1726static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1727{
1728 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1729 struct sockaddr *addr = p;
1730 int rc;
1731
1732 rc = 0;
1733 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1734 if (adapter->state != VNIC_PROBED)
1735 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1736
1737 return rc;
1738}
1739
1740
1741
1742
1743
1744static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1745 struct ibmvnic_rwi *rwi,
1746 u32 reset_state)
1747{
1748 struct net_device *netdev = adapter->netdev;
1749 int i, rc;
1750
1751 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1752 rwi->reset_reason);
1753
1754 netif_carrier_off(netdev);
1755 adapter->reset_reason = rwi->reset_reason;
1756
1757 ibmvnic_cleanup(netdev);
1758
1759 if (reset_state == VNIC_OPEN) {
1760 rc = __ibmvnic_close(netdev);
1761 if (rc)
1762 return rc;
1763 }
1764
1765 release_resources(adapter);
1766 release_sub_crqs(adapter, 1);
1767 release_crq_queue(adapter);
1768
1769 adapter->state = VNIC_PROBED;
1770
1771 rc = init_crq_queue(adapter);
1772
1773 if (rc) {
1774 netdev_err(adapter->netdev,
1775 "Couldn't initialize crq. rc=%d\n", rc);
1776 return rc;
1777 }
1778
1779 rc = ibmvnic_reset_init(adapter);
1780 if (rc)
1781 return IBMVNIC_INIT_FAILED;
1782
1783
1784
1785
1786 if (reset_state == VNIC_PROBED)
1787 return 0;
1788
1789 rc = ibmvnic_login(netdev);
1790 if (rc) {
1791 adapter->state = reset_state;
1792 return rc;
1793 }
1794
1795 rc = init_resources(adapter);
1796 if (rc)
1797 return rc;
1798
1799 ibmvnic_disable_irqs(adapter);
1800
1801 adapter->state = VNIC_CLOSED;
1802
1803 if (reset_state == VNIC_CLOSED)
1804 return 0;
1805
1806 rc = __ibmvnic_open(netdev);
1807 if (rc)
1808 return IBMVNIC_OPEN_FAILED;
1809
1810
1811 ibmvnic_set_multi(netdev);
1812
1813
1814 for (i = 0; i < adapter->req_rx_queues; i++)
1815 napi_schedule(&adapter->napi[i]);
1816
1817 return 0;
1818}
1819
1820
1821
1822
1823
1824static int do_reset(struct ibmvnic_adapter *adapter,
1825 struct ibmvnic_rwi *rwi, u32 reset_state)
1826{
1827 u64 old_num_rx_queues, old_num_tx_queues;
1828 u64 old_num_rx_slots, old_num_tx_slots;
1829 struct net_device *netdev = adapter->netdev;
1830 int i, rc;
1831
1832 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1833 rwi->reset_reason);
1834
1835 rtnl_lock();
1836
1837 netif_carrier_off(netdev);
1838 adapter->reset_reason = rwi->reset_reason;
1839
1840 old_num_rx_queues = adapter->req_rx_queues;
1841 old_num_tx_queues = adapter->req_tx_queues;
1842 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1843 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1844
1845 ibmvnic_cleanup(netdev);
1846
1847 if (reset_state == VNIC_OPEN &&
1848 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1849 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1850 adapter->state = VNIC_CLOSING;
1851
1852
1853
1854
1855
1856
1857 rtnl_unlock();
1858 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1859 rtnl_lock();
1860 if (rc)
1861 goto out;
1862
1863 if (adapter->state != VNIC_CLOSING) {
1864 rc = -1;
1865 goto out;
1866 }
1867
1868 adapter->state = VNIC_CLOSED;
1869 }
1870
1871 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1872
1873
1874
1875 adapter->state = VNIC_PROBED;
1876
1877 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1878 rc = ibmvnic_reenable_crq_queue(adapter);
1879 release_sub_crqs(adapter, 1);
1880 } else {
1881 rc = ibmvnic_reset_crq(adapter);
1882 if (!rc)
1883 rc = vio_enable_interrupts(adapter->vdev);
1884 }
1885
1886 if (rc) {
1887 netdev_err(adapter->netdev,
1888 "Couldn't initialize crq. rc=%d\n", rc);
1889 goto out;
1890 }
1891
1892 rc = ibmvnic_reset_init(adapter);
1893 if (rc) {
1894 rc = IBMVNIC_INIT_FAILED;
1895 goto out;
1896 }
1897
1898
1899
1900
1901 if (reset_state == VNIC_PROBED) {
1902 rc = 0;
1903 goto out;
1904 }
1905
1906 rc = ibmvnic_login(netdev);
1907 if (rc) {
1908 adapter->state = reset_state;
1909 goto out;
1910 }
1911
1912 if (adapter->req_rx_queues != old_num_rx_queues ||
1913 adapter->req_tx_queues != old_num_tx_queues ||
1914 adapter->req_rx_add_entries_per_subcrq !=
1915 old_num_rx_slots ||
1916 adapter->req_tx_entries_per_subcrq !=
1917 old_num_tx_slots) {
1918 release_rx_pools(adapter);
1919 release_tx_pools(adapter);
1920 release_napi(adapter);
1921 release_vpd_data(adapter);
1922
1923 rc = init_resources(adapter);
1924 if (rc)
1925 goto out;
1926
1927 } else {
1928 rc = reset_tx_pools(adapter);
1929 if (rc)
1930 goto out;
1931
1932 rc = reset_rx_pools(adapter);
1933 if (rc)
1934 goto out;
1935 }
1936 ibmvnic_disable_irqs(adapter);
1937 }
1938 adapter->state = VNIC_CLOSED;
1939
1940 if (reset_state == VNIC_CLOSED) {
1941 rc = 0;
1942 goto out;
1943 }
1944
1945 rc = __ibmvnic_open(netdev);
1946 if (rc) {
1947 rc = IBMVNIC_OPEN_FAILED;
1948 goto out;
1949 }
1950
1951
1952 ibmvnic_set_multi(netdev);
1953
1954
1955 for (i = 0; i < adapter->req_rx_queues; i++)
1956 napi_schedule(&adapter->napi[i]);
1957
1958 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1959 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1960
1961 rc = 0;
1962
1963out:
1964 rtnl_unlock();
1965
1966 return rc;
1967}
1968
1969static int do_hard_reset(struct ibmvnic_adapter *adapter,
1970 struct ibmvnic_rwi *rwi, u32 reset_state)
1971{
1972 struct net_device *netdev = adapter->netdev;
1973 int rc;
1974
1975 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1976 rwi->reset_reason);
1977
1978 netif_carrier_off(netdev);
1979 adapter->reset_reason = rwi->reset_reason;
1980
1981 ibmvnic_cleanup(netdev);
1982 release_resources(adapter);
1983 release_sub_crqs(adapter, 0);
1984 release_crq_queue(adapter);
1985
1986
1987
1988
1989 adapter->state = VNIC_PROBED;
1990
1991 reinit_completion(&adapter->init_done);
1992 rc = init_crq_queue(adapter);
1993 if (rc) {
1994 netdev_err(adapter->netdev,
1995 "Couldn't initialize crq. rc=%d\n", rc);
1996 return rc;
1997 }
1998
1999 rc = ibmvnic_init(adapter);
2000 if (rc)
2001 return rc;
2002
2003
2004
2005
2006 if (reset_state == VNIC_PROBED)
2007 return 0;
2008
2009 rc = ibmvnic_login(netdev);
2010 if (rc) {
2011 adapter->state = VNIC_PROBED;
2012 return 0;
2013 }
2014
2015 rc = init_resources(adapter);
2016 if (rc)
2017 return rc;
2018
2019 ibmvnic_disable_irqs(adapter);
2020 adapter->state = VNIC_CLOSED;
2021
2022 if (reset_state == VNIC_CLOSED)
2023 return 0;
2024
2025 rc = __ibmvnic_open(netdev);
2026 if (rc)
2027 return IBMVNIC_OPEN_FAILED;
2028
2029 return 0;
2030}
2031
2032static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2033{
2034 struct ibmvnic_rwi *rwi;
2035 unsigned long flags;
2036
2037 spin_lock_irqsave(&adapter->rwi_lock, flags);
2038
2039 if (!list_empty(&adapter->rwi_list)) {
2040 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2041 list);
2042 list_del(&rwi->list);
2043 } else {
2044 rwi = NULL;
2045 }
2046
2047 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2048 return rwi;
2049}
2050
2051static void free_all_rwi(struct ibmvnic_adapter *adapter)
2052{
2053 struct ibmvnic_rwi *rwi;
2054
2055 rwi = get_next_rwi(adapter);
2056 while (rwi) {
2057 kfree(rwi);
2058 rwi = get_next_rwi(adapter);
2059 }
2060}
2061
2062static void __ibmvnic_reset(struct work_struct *work)
2063{
2064 struct ibmvnic_rwi *rwi;
2065 struct ibmvnic_adapter *adapter;
2066 struct net_device *netdev;
2067 u32 reset_state;
2068 int rc = 0;
2069
2070 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2071 netdev = adapter->netdev;
2072
2073 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2074 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2075 IBMVNIC_RESET_DELAY);
2076 return;
2077 }
2078
2079 reset_state = adapter->state;
2080
2081 rwi = get_next_rwi(adapter);
2082 while (rwi) {
2083 if (adapter->state == VNIC_REMOVING ||
2084 adapter->state == VNIC_REMOVED) {
2085 kfree(rwi);
2086 rc = EBUSY;
2087 break;
2088 }
2089
2090 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2091
2092 rc = do_change_param_reset(adapter, rwi, reset_state);
2093 } else if (adapter->force_reset_recovery) {
2094
2095 if (adapter->wait_for_reset) {
2096
2097 adapter->force_reset_recovery = false;
2098 rc = do_hard_reset(adapter, rwi, reset_state);
2099 } else {
2100 rtnl_lock();
2101 adapter->force_reset_recovery = false;
2102 rc = do_hard_reset(adapter, rwi, reset_state);
2103 rtnl_unlock();
2104 }
2105 } else {
2106 rc = do_reset(adapter, rwi, reset_state);
2107 }
2108 kfree(rwi);
2109 if (rc == IBMVNIC_OPEN_FAILED) {
2110 if (list_empty(&adapter->rwi_list))
2111 adapter->state = VNIC_CLOSED;
2112 else
2113 adapter->state = reset_state;
2114 rc = 0;
2115 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
2116 !adapter->force_reset_recovery)
2117 break;
2118
2119 rwi = get_next_rwi(adapter);
2120
2121 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2122 rwi->reset_reason == VNIC_RESET_MOBILITY))
2123 adapter->force_reset_recovery = true;
2124 }
2125
2126 if (adapter->wait_for_reset) {
2127 adapter->reset_done_rc = rc;
2128 complete(&adapter->reset_done);
2129 }
2130
2131 if (rc) {
2132 netdev_dbg(adapter->netdev, "Reset failed\n");
2133 free_all_rwi(adapter);
2134 }
2135
2136 clear_bit_unlock(0, &adapter->resetting);
2137}
2138
2139static void __ibmvnic_delayed_reset(struct work_struct *work)
2140{
2141 struct ibmvnic_adapter *adapter;
2142
2143 adapter = container_of(work, struct ibmvnic_adapter,
2144 ibmvnic_delayed_reset.work);
2145 __ibmvnic_reset(&adapter->ibmvnic_reset);
2146}
2147
2148static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2149 enum ibmvnic_reset_reason reason)
2150{
2151 struct list_head *entry, *tmp_entry;
2152 struct ibmvnic_rwi *rwi, *tmp;
2153 struct net_device *netdev = adapter->netdev;
2154 unsigned long flags;
2155 int ret;
2156
2157 if (adapter->state == VNIC_REMOVING ||
2158 adapter->state == VNIC_REMOVED ||
2159 adapter->failover_pending) {
2160 ret = EBUSY;
2161 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2162 goto err;
2163 }
2164
2165 if (adapter->state == VNIC_PROBING) {
2166 netdev_warn(netdev, "Adapter reset during probe\n");
2167 ret = adapter->init_done_rc = EAGAIN;
2168 goto err;
2169 }
2170
2171 spin_lock_irqsave(&adapter->rwi_lock, flags);
2172
2173 list_for_each(entry, &adapter->rwi_list) {
2174 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2175 if (tmp->reset_reason == reason) {
2176 netdev_dbg(netdev, "Skipping matching reset\n");
2177 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2178 ret = EBUSY;
2179 goto err;
2180 }
2181 }
2182
2183 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2184 if (!rwi) {
2185 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2186 ibmvnic_close(netdev);
2187 ret = ENOMEM;
2188 goto err;
2189 }
2190
2191
2192
2193 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2194 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2195 list_del(entry);
2196 }
2197 rwi->reset_reason = reason;
2198 list_add_tail(&rwi->list, &adapter->rwi_list);
2199 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2200 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2201 schedule_work(&adapter->ibmvnic_reset);
2202
2203 return 0;
2204err:
2205 return -ret;
2206}
2207
2208static void ibmvnic_tx_timeout(struct net_device *dev)
2209{
2210 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2211
2212 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2213}
2214
2215static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2216 struct ibmvnic_rx_buff *rx_buff)
2217{
2218 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2219
2220 rx_buff->skb = NULL;
2221
2222 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2223 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2224
2225 atomic_dec(&pool->available);
2226}
2227
2228static int ibmvnic_poll(struct napi_struct *napi, int budget)
2229{
2230 struct net_device *netdev = napi->dev;
2231 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2232 int scrq_num = (int)(napi - adapter->napi);
2233 int frames_processed = 0;
2234
2235restart_poll:
2236 while (frames_processed < budget) {
2237 struct sk_buff *skb;
2238 struct ibmvnic_rx_buff *rx_buff;
2239 union sub_crq *next;
2240 u32 length;
2241 u16 offset;
2242 u8 flags = 0;
2243
2244 if (unlikely(test_bit(0, &adapter->resetting) &&
2245 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2246 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2247 napi_complete_done(napi, frames_processed);
2248 return frames_processed;
2249 }
2250
2251 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2252 break;
2253 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2254 rx_buff =
2255 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2256 rx_comp.correlator);
2257
2258 if (next->rx_comp.rc) {
2259 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2260 be16_to_cpu(next->rx_comp.rc));
2261
2262 next->rx_comp.first = 0;
2263 dev_kfree_skb_any(rx_buff->skb);
2264 remove_buff_from_pool(adapter, rx_buff);
2265 continue;
2266 } else if (!rx_buff->skb) {
2267
2268 next->rx_comp.first = 0;
2269 remove_buff_from_pool(adapter, rx_buff);
2270 continue;
2271 }
2272
2273 length = be32_to_cpu(next->rx_comp.len);
2274 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2275 flags = next->rx_comp.flags;
2276 skb = rx_buff->skb;
2277 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2278 length);
2279
2280
2281
2282
2283 if (adapter->rx_vlan_header_insertion &&
2284 (flags & IBMVNIC_VLAN_STRIPPED))
2285 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2286 ntohs(next->rx_comp.vlan_tci));
2287
2288
2289 next->rx_comp.first = 0;
2290 remove_buff_from_pool(adapter, rx_buff);
2291
2292 skb_put(skb, length);
2293 skb->protocol = eth_type_trans(skb, netdev);
2294 skb_record_rx_queue(skb, scrq_num);
2295
2296 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2297 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2298 skb->ip_summed = CHECKSUM_UNNECESSARY;
2299 }
2300
2301 length = skb->len;
2302 napi_gro_receive(napi, skb);
2303 netdev->stats.rx_packets++;
2304 netdev->stats.rx_bytes += length;
2305 adapter->rx_stats_buffers[scrq_num].packets++;
2306 adapter->rx_stats_buffers[scrq_num].bytes += length;
2307 frames_processed++;
2308 }
2309
2310 if (adapter->state != VNIC_CLOSING)
2311 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2312
2313 if (frames_processed < budget) {
2314 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2315 napi_complete_done(napi, frames_processed);
2316 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2317 napi_reschedule(napi)) {
2318 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2319 goto restart_poll;
2320 }
2321 }
2322 return frames_processed;
2323}
2324
2325#ifdef CONFIG_NET_POLL_CONTROLLER
2326static void ibmvnic_netpoll_controller(struct net_device *dev)
2327{
2328 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2329 int i;
2330
2331 replenish_pools(netdev_priv(dev));
2332 for (i = 0; i < adapter->req_rx_queues; i++)
2333 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
2334 adapter->rx_scrq[i]);
2335}
2336#endif
2337
2338static int wait_for_reset(struct ibmvnic_adapter *adapter)
2339{
2340 int rc, ret;
2341
2342 adapter->fallback.mtu = adapter->req_mtu;
2343 adapter->fallback.rx_queues = adapter->req_rx_queues;
2344 adapter->fallback.tx_queues = adapter->req_tx_queues;
2345 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2346 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2347
2348 init_completion(&adapter->reset_done);
2349 adapter->wait_for_reset = true;
2350 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2351 if (rc)
2352 return rc;
2353 wait_for_completion(&adapter->reset_done);
2354
2355 ret = 0;
2356 if (adapter->reset_done_rc) {
2357 ret = -EIO;
2358 adapter->desired.mtu = adapter->fallback.mtu;
2359 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2360 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2361 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2362 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2363
2364 init_completion(&adapter->reset_done);
2365 adapter->wait_for_reset = true;
2366 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2367 if (rc)
2368 return ret;
2369 wait_for_completion(&adapter->reset_done);
2370 }
2371 adapter->wait_for_reset = false;
2372
2373 return ret;
2374}
2375
2376static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2377{
2378 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2379
2380 adapter->desired.mtu = new_mtu + ETH_HLEN;
2381
2382 return wait_for_reset(adapter);
2383}
2384
2385static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2386 struct net_device *dev,
2387 netdev_features_t features)
2388{
2389
2390
2391
2392
2393 if (skb_is_gso(skb)) {
2394 if (skb_shinfo(skb)->gso_size < 224 ||
2395 skb_shinfo(skb)->gso_segs == 1)
2396 features &= ~NETIF_F_GSO_MASK;
2397 }
2398
2399 return features;
2400}
2401
2402static const struct net_device_ops ibmvnic_netdev_ops = {
2403 .ndo_open = ibmvnic_open,
2404 .ndo_stop = ibmvnic_close,
2405 .ndo_start_xmit = ibmvnic_xmit,
2406 .ndo_set_rx_mode = ibmvnic_set_multi,
2407 .ndo_set_mac_address = ibmvnic_set_mac,
2408 .ndo_validate_addr = eth_validate_addr,
2409 .ndo_tx_timeout = ibmvnic_tx_timeout,
2410#ifdef CONFIG_NET_POLL_CONTROLLER
2411 .ndo_poll_controller = ibmvnic_netpoll_controller,
2412#endif
2413 .ndo_change_mtu_rh74 = ibmvnic_change_mtu,
2414 .ndo_features_check = ibmvnic_features_check,
2415};
2416
2417
2418
2419static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2420 struct ethtool_link_ksettings *cmd)
2421{
2422 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2423 int rc;
2424
2425 rc = send_query_phys_parms(adapter);
2426 if (rc) {
2427 adapter->speed = SPEED_UNKNOWN;
2428 adapter->duplex = DUPLEX_UNKNOWN;
2429 }
2430 cmd->base.speed = adapter->speed;
2431 cmd->base.duplex = adapter->duplex;
2432 cmd->base.port = PORT_FIBRE;
2433 cmd->base.phy_address = 0;
2434 cmd->base.autoneg = AUTONEG_ENABLE;
2435
2436 return 0;
2437}
2438
2439static void ibmvnic_get_drvinfo(struct net_device *netdev,
2440 struct ethtool_drvinfo *info)
2441{
2442 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2443
2444 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2445 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2446 strlcpy(info->fw_version, adapter->fw_version,
2447 sizeof(info->fw_version));
2448}
2449
2450static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2451{
2452 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2453
2454 return adapter->msg_enable;
2455}
2456
2457static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2458{
2459 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2460
2461 adapter->msg_enable = data;
2462}
2463
2464static u32 ibmvnic_get_link(struct net_device *netdev)
2465{
2466 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2467
2468
2469
2470
2471 return adapter->logical_link_state;
2472}
2473
2474static void ibmvnic_get_ringparam(struct net_device *netdev,
2475 struct ethtool_ringparam *ring)
2476{
2477 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2478
2479 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2480 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2481 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2482 } else {
2483 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2484 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2485 }
2486 ring->rx_mini_max_pending = 0;
2487 ring->rx_jumbo_max_pending = 0;
2488 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2489 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2490 ring->rx_mini_pending = 0;
2491 ring->rx_jumbo_pending = 0;
2492}
2493
2494static int ibmvnic_set_ringparam(struct net_device *netdev,
2495 struct ethtool_ringparam *ring)
2496{
2497 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2498 int ret;
2499
2500 ret = 0;
2501 adapter->desired.rx_entries = ring->rx_pending;
2502 adapter->desired.tx_entries = ring->tx_pending;
2503
2504 ret = wait_for_reset(adapter);
2505
2506 if (!ret &&
2507 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2508 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2509 netdev_info(netdev,
2510 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2511 ring->rx_pending, ring->tx_pending,
2512 adapter->req_rx_add_entries_per_subcrq,
2513 adapter->req_tx_entries_per_subcrq);
2514 return ret;
2515}
2516
2517static void ibmvnic_get_channels(struct net_device *netdev,
2518 struct ethtool_channels *channels)
2519{
2520 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2521
2522 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2523 channels->max_rx = adapter->max_rx_queues;
2524 channels->max_tx = adapter->max_tx_queues;
2525 } else {
2526 channels->max_rx = IBMVNIC_MAX_QUEUES;
2527 channels->max_tx = IBMVNIC_MAX_QUEUES;
2528 }
2529
2530 channels->max_other = 0;
2531 channels->max_combined = 0;
2532 channels->rx_count = adapter->req_rx_queues;
2533 channels->tx_count = adapter->req_tx_queues;
2534 channels->other_count = 0;
2535 channels->combined_count = 0;
2536}
2537
2538static int ibmvnic_set_channels(struct net_device *netdev,
2539 struct ethtool_channels *channels)
2540{
2541 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2542 int ret;
2543
2544 ret = 0;
2545 adapter->desired.rx_queues = channels->rx_count;
2546 adapter->desired.tx_queues = channels->tx_count;
2547
2548 ret = wait_for_reset(adapter);
2549
2550 if (!ret &&
2551 (adapter->req_rx_queues != channels->rx_count ||
2552 adapter->req_tx_queues != channels->tx_count))
2553 netdev_info(netdev,
2554 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2555 channels->rx_count, channels->tx_count,
2556 adapter->req_rx_queues, adapter->req_tx_queues);
2557 return ret;
2558
2559}
2560
2561static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2562{
2563 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2564 int i;
2565
2566 switch (stringset) {
2567 case ETH_SS_STATS:
2568 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2569 i++, data += ETH_GSTRING_LEN)
2570 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2571
2572 for (i = 0; i < adapter->req_tx_queues; i++) {
2573 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2574 data += ETH_GSTRING_LEN;
2575
2576 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2577 data += ETH_GSTRING_LEN;
2578
2579 snprintf(data, ETH_GSTRING_LEN,
2580 "tx%d_dropped_packets", i);
2581 data += ETH_GSTRING_LEN;
2582 }
2583
2584 for (i = 0; i < adapter->req_rx_queues; i++) {
2585 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2586 data += ETH_GSTRING_LEN;
2587
2588 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2589 data += ETH_GSTRING_LEN;
2590
2591 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2592 data += ETH_GSTRING_LEN;
2593 }
2594 break;
2595
2596 case ETH_SS_PRIV_FLAGS:
2597 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2598 strcpy(data + i * ETH_GSTRING_LEN,
2599 ibmvnic_priv_flags[i]);
2600 break;
2601 default:
2602 return;
2603 }
2604}
2605
2606static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2607{
2608 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2609
2610 switch (sset) {
2611 case ETH_SS_STATS:
2612 return ARRAY_SIZE(ibmvnic_stats) +
2613 adapter->req_tx_queues * NUM_TX_STATS +
2614 adapter->req_rx_queues * NUM_RX_STATS;
2615 case ETH_SS_PRIV_FLAGS:
2616 return ARRAY_SIZE(ibmvnic_priv_flags);
2617 default:
2618 return -EOPNOTSUPP;
2619 }
2620}
2621
2622static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2623 struct ethtool_stats *stats, u64 *data)
2624{
2625 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2626 union ibmvnic_crq crq;
2627 int i, j;
2628 int rc;
2629
2630 memset(&crq, 0, sizeof(crq));
2631 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2632 crq.request_statistics.cmd = REQUEST_STATISTICS;
2633 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2634 crq.request_statistics.len =
2635 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2636
2637
2638 init_completion(&adapter->stats_done);
2639 rc = ibmvnic_send_crq(adapter, &crq);
2640 if (rc)
2641 return;
2642 wait_for_completion(&adapter->stats_done);
2643
2644 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2645 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2646 ibmvnic_stats[i].offset));
2647
2648 for (j = 0; j < adapter->req_tx_queues; j++) {
2649 data[i] = adapter->tx_stats_buffers[j].packets;
2650 i++;
2651 data[i] = adapter->tx_stats_buffers[j].bytes;
2652 i++;
2653 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2654 i++;
2655 }
2656
2657 for (j = 0; j < adapter->req_rx_queues; j++) {
2658 data[i] = adapter->rx_stats_buffers[j].packets;
2659 i++;
2660 data[i] = adapter->rx_stats_buffers[j].bytes;
2661 i++;
2662 data[i] = adapter->rx_stats_buffers[j].interrupts;
2663 i++;
2664 }
2665}
2666
2667static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2668{
2669 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2670
2671 return adapter->priv_flags;
2672}
2673
2674static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2675{
2676 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2677 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2678
2679 if (which_maxes)
2680 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2681 else
2682 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2683
2684 return 0;
2685}
2686static const struct ethtool_ops ibmvnic_ethtool_ops = {
2687 .get_drvinfo = ibmvnic_get_drvinfo,
2688 .get_msglevel = ibmvnic_get_msglevel,
2689 .set_msglevel = ibmvnic_set_msglevel,
2690 .get_link = ibmvnic_get_link,
2691 .get_ringparam = ibmvnic_get_ringparam,
2692 .set_ringparam = ibmvnic_set_ringparam,
2693 .get_channels = ibmvnic_get_channels,
2694 .set_channels = ibmvnic_set_channels,
2695 .get_strings = ibmvnic_get_strings,
2696 .get_sset_count = ibmvnic_get_sset_count,
2697 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2698 .get_link_ksettings = ibmvnic_get_link_ksettings,
2699 .get_priv_flags = ibmvnic_get_priv_flags,
2700 .set_priv_flags = ibmvnic_set_priv_flags,
2701};
2702
2703
2704
2705static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2706 struct ibmvnic_sub_crq_queue *scrq)
2707{
2708 int rc;
2709
2710 if (scrq->irq) {
2711 free_irq(scrq->irq, scrq);
2712 irq_dispose_mapping(scrq->irq);
2713 scrq->irq = 0;
2714 }
2715
2716 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2717 atomic_set(&scrq->used, 0);
2718 scrq->cur = 0;
2719
2720 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2721 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2722 return rc;
2723}
2724
2725static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2726{
2727 int i, rc;
2728
2729 for (i = 0; i < adapter->req_tx_queues; i++) {
2730 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2731 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2732 if (rc)
2733 return rc;
2734 }
2735
2736 for (i = 0; i < adapter->req_rx_queues; i++) {
2737 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2738 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2739 if (rc)
2740 return rc;
2741 }
2742
2743 return rc;
2744}
2745
2746static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2747 struct ibmvnic_sub_crq_queue *scrq,
2748 bool do_h_free)
2749{
2750 struct device *dev = &adapter->vdev->dev;
2751 long rc;
2752
2753 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2754
2755 if (do_h_free) {
2756
2757 do {
2758 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2759 adapter->vdev->unit_address,
2760 scrq->crq_num);
2761 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2762
2763 if (rc) {
2764 netdev_err(adapter->netdev,
2765 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2766 scrq->crq_num, rc);
2767 }
2768 }
2769
2770 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2771 DMA_BIDIRECTIONAL);
2772 free_pages((unsigned long)scrq->msgs, 2);
2773 kfree(scrq);
2774}
2775
2776static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2777 *adapter)
2778{
2779 struct device *dev = &adapter->vdev->dev;
2780 struct ibmvnic_sub_crq_queue *scrq;
2781 int rc;
2782
2783 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2784 if (!scrq)
2785 return NULL;
2786
2787 scrq->msgs =
2788 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2789 if (!scrq->msgs) {
2790 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2791 goto zero_page_failed;
2792 }
2793
2794 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2795 DMA_BIDIRECTIONAL);
2796 if (dma_mapping_error(dev, scrq->msg_token)) {
2797 dev_warn(dev, "Couldn't map crq queue messages page\n");
2798 goto map_failed;
2799 }
2800
2801 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2802 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2803
2804 if (rc == H_RESOURCE)
2805 rc = ibmvnic_reset_crq(adapter);
2806
2807 if (rc == H_CLOSED) {
2808 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2809 } else if (rc) {
2810 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2811 goto reg_failed;
2812 }
2813
2814 scrq->adapter = adapter;
2815 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2816 spin_lock_init(&scrq->lock);
2817
2818 netdev_dbg(adapter->netdev,
2819 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2820 scrq->crq_num, scrq->hw_irq, scrq->irq);
2821
2822 return scrq;
2823
2824reg_failed:
2825 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2826 DMA_BIDIRECTIONAL);
2827map_failed:
2828 free_pages((unsigned long)scrq->msgs, 2);
2829zero_page_failed:
2830 kfree(scrq);
2831
2832 return NULL;
2833}
2834
2835static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2836{
2837 int i;
2838
2839 if (adapter->tx_scrq) {
2840 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2841 if (!adapter->tx_scrq[i])
2842 continue;
2843
2844 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2845 i);
2846 if (adapter->tx_scrq[i]->irq) {
2847 free_irq(adapter->tx_scrq[i]->irq,
2848 adapter->tx_scrq[i]);
2849 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2850 adapter->tx_scrq[i]->irq = 0;
2851 }
2852
2853 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2854 do_h_free);
2855 }
2856
2857 kfree(adapter->tx_scrq);
2858 adapter->tx_scrq = NULL;
2859 adapter->num_active_tx_scrqs = 0;
2860 }
2861
2862 if (adapter->rx_scrq) {
2863 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2864 if (!adapter->rx_scrq[i])
2865 continue;
2866
2867 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2868 i);
2869 if (adapter->rx_scrq[i]->irq) {
2870 free_irq(adapter->rx_scrq[i]->irq,
2871 adapter->rx_scrq[i]);
2872 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2873 adapter->rx_scrq[i]->irq = 0;
2874 }
2875
2876 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2877 do_h_free);
2878 }
2879
2880 kfree(adapter->rx_scrq);
2881 adapter->rx_scrq = NULL;
2882 adapter->num_active_rx_scrqs = 0;
2883 }
2884}
2885
2886static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2887 struct ibmvnic_sub_crq_queue *scrq)
2888{
2889 struct device *dev = &adapter->vdev->dev;
2890 unsigned long rc;
2891
2892 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2893 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2894 if (rc)
2895 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2896 scrq->hw_irq, rc);
2897 return rc;
2898}
2899
2900static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2901 struct ibmvnic_sub_crq_queue *scrq)
2902{
2903 struct device *dev = &adapter->vdev->dev;
2904 unsigned long rc;
2905
2906 if (scrq->hw_irq > 0x100000000ULL) {
2907 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2908 return 1;
2909 }
2910
2911 if (test_bit(0, &adapter->resetting) &&
2912 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2913 u64 val = (0xff000000) | scrq->hw_irq;
2914
2915 rc = plpar_hcall_norets(H_EOI, val);
2916 if (rc)
2917 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2918 val, rc);
2919 }
2920
2921 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2922 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2923 if (rc)
2924 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2925 scrq->hw_irq, rc);
2926 return rc;
2927}
2928
2929static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2930 struct ibmvnic_sub_crq_queue *scrq)
2931{
2932 struct device *dev = &adapter->vdev->dev;
2933 struct ibmvnic_tx_pool *tx_pool;
2934 struct ibmvnic_tx_buff *txbuff;
2935 union sub_crq *next;
2936 int index;
2937 int i, j;
2938
2939restart_loop:
2940 while (pending_scrq(adapter, scrq)) {
2941 unsigned int pool = scrq->pool_index;
2942 int num_entries = 0;
2943
2944 next = ibmvnic_next_scrq(adapter, scrq);
2945 for (i = 0; i < next->tx_comp.num_comps; i++) {
2946 if (next->tx_comp.rcs[i]) {
2947 dev_err(dev, "tx error %x\n",
2948 next->tx_comp.rcs[i]);
2949 continue;
2950 }
2951 index = be32_to_cpu(next->tx_comp.correlators[i]);
2952 if (index & IBMVNIC_TSO_POOL_MASK) {
2953 tx_pool = &adapter->tso_pool[pool];
2954 index &= ~IBMVNIC_TSO_POOL_MASK;
2955 } else {
2956 tx_pool = &adapter->tx_pool[pool];
2957 }
2958
2959 txbuff = &tx_pool->tx_buff[index];
2960
2961 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2962 if (!txbuff->data_dma[j])
2963 continue;
2964
2965 txbuff->data_dma[j] = 0;
2966 }
2967
2968 if (txbuff->last_frag) {
2969 dev_kfree_skb_any(txbuff->skb);
2970 txbuff->skb = NULL;
2971 }
2972
2973 num_entries += txbuff->num_entries;
2974
2975 tx_pool->free_map[tx_pool->producer_index] = index;
2976 tx_pool->producer_index =
2977 (tx_pool->producer_index + 1) %
2978 tx_pool->num_buffers;
2979 }
2980
2981 next->tx_comp.first = 0;
2982
2983 if (atomic_sub_return(num_entries, &scrq->used) <=
2984 (adapter->req_tx_entries_per_subcrq / 2) &&
2985 __netif_subqueue_stopped(adapter->netdev,
2986 scrq->pool_index)) {
2987 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2988 netdev_dbg(adapter->netdev, "Started queue %d\n",
2989 scrq->pool_index);
2990 }
2991 }
2992
2993 enable_scrq_irq(adapter, scrq);
2994
2995 if (pending_scrq(adapter, scrq)) {
2996 disable_scrq_irq(adapter, scrq);
2997 goto restart_loop;
2998 }
2999
3000 return 0;
3001}
3002
3003static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3004{
3005 struct ibmvnic_sub_crq_queue *scrq = instance;
3006 struct ibmvnic_adapter *adapter = scrq->adapter;
3007
3008 disable_scrq_irq(adapter, scrq);
3009 ibmvnic_complete_tx(adapter, scrq);
3010
3011 return IRQ_HANDLED;
3012}
3013
3014static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3015{
3016 struct ibmvnic_sub_crq_queue *scrq = instance;
3017 struct ibmvnic_adapter *adapter = scrq->adapter;
3018
3019
3020
3021
3022 if (unlikely(adapter->state != VNIC_OPEN))
3023 return IRQ_NONE;
3024
3025 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3026
3027 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3028 disable_scrq_irq(adapter, scrq);
3029 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3030 }
3031
3032 return IRQ_HANDLED;
3033}
3034
3035static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3036{
3037 struct device *dev = &adapter->vdev->dev;
3038 struct ibmvnic_sub_crq_queue *scrq;
3039 int i = 0, j = 0;
3040 int rc = 0;
3041
3042 for (i = 0; i < adapter->req_tx_queues; i++) {
3043 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3044 i);
3045 scrq = adapter->tx_scrq[i];
3046 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3047
3048 if (scrq->irq == NO_IRQ) {
3049 rc = -EINVAL;
3050 dev_err(dev, "Error mapping irq\n");
3051 goto req_tx_irq_failed;
3052 }
3053
3054 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3055 0, "ibmvnic_tx", scrq);
3056
3057 if (rc) {
3058 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3059 scrq->irq, rc);
3060 irq_dispose_mapping(scrq->irq);
3061 goto req_tx_irq_failed;
3062 }
3063 }
3064
3065 for (i = 0; i < adapter->req_rx_queues; i++) {
3066 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3067 i);
3068 scrq = adapter->rx_scrq[i];
3069 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3070 if (scrq->irq == NO_IRQ) {
3071 rc = -EINVAL;
3072 dev_err(dev, "Error mapping irq\n");
3073 goto req_rx_irq_failed;
3074 }
3075 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3076 0, "ibmvnic_rx", scrq);
3077 if (rc) {
3078 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3079 scrq->irq, rc);
3080 irq_dispose_mapping(scrq->irq);
3081 goto req_rx_irq_failed;
3082 }
3083 }
3084 return rc;
3085
3086req_rx_irq_failed:
3087 for (j = 0; j < i; j++) {
3088 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3089 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3090 }
3091 i = adapter->req_tx_queues;
3092req_tx_irq_failed:
3093 for (j = 0; j < i; j++) {
3094 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3095 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3096 }
3097 release_sub_crqs(adapter, 1);
3098 return rc;
3099}
3100
3101static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3102{
3103 struct device *dev = &adapter->vdev->dev;
3104 struct ibmvnic_sub_crq_queue **allqueues;
3105 int registered_queues = 0;
3106 int total_queues;
3107 int more = 0;
3108 int i;
3109
3110 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3111
3112 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3113 if (!allqueues)
3114 return -1;
3115
3116 for (i = 0; i < total_queues; i++) {
3117 allqueues[i] = init_sub_crq_queue(adapter);
3118 if (!allqueues[i]) {
3119 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3120 break;
3121 }
3122 registered_queues++;
3123 }
3124
3125
3126 if (registered_queues <
3127 adapter->min_tx_queues + adapter->min_rx_queues) {
3128 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3129 goto tx_failed;
3130 }
3131
3132
3133 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3134 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3135 switch (i % 3) {
3136 case 0:
3137 if (adapter->req_rx_queues > adapter->min_rx_queues)
3138 adapter->req_rx_queues--;
3139 else
3140 more++;
3141 break;
3142 case 1:
3143 if (adapter->req_tx_queues > adapter->min_tx_queues)
3144 adapter->req_tx_queues--;
3145 else
3146 more++;
3147 break;
3148 }
3149 }
3150
3151 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3152 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3153 if (!adapter->tx_scrq)
3154 goto tx_failed;
3155
3156 for (i = 0; i < adapter->req_tx_queues; i++) {
3157 adapter->tx_scrq[i] = allqueues[i];
3158 adapter->tx_scrq[i]->pool_index = i;
3159 adapter->num_active_tx_scrqs++;
3160 }
3161
3162 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3163 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3164 if (!adapter->rx_scrq)
3165 goto rx_failed;
3166
3167 for (i = 0; i < adapter->req_rx_queues; i++) {
3168 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3169 adapter->rx_scrq[i]->scrq_num = i;
3170 adapter->num_active_rx_scrqs++;
3171 }
3172
3173 kfree(allqueues);
3174 return 0;
3175
3176rx_failed:
3177 kfree(adapter->tx_scrq);
3178 adapter->tx_scrq = NULL;
3179tx_failed:
3180 for (i = 0; i < registered_queues; i++)
3181 release_sub_crq_queue(adapter, allqueues[i], 1);
3182 kfree(allqueues);
3183 return -1;
3184}
3185
3186static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3187{
3188 struct device *dev = &adapter->vdev->dev;
3189 union ibmvnic_crq crq;
3190 int max_entries;
3191
3192 if (!retry) {
3193
3194 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3195
3196 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3197 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3198 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3199 return;
3200 }
3201
3202 if (adapter->desired.mtu)
3203 adapter->req_mtu = adapter->desired.mtu;
3204 else
3205 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3206
3207 if (!adapter->desired.tx_entries)
3208 adapter->desired.tx_entries =
3209 adapter->max_tx_entries_per_subcrq;
3210 if (!adapter->desired.rx_entries)
3211 adapter->desired.rx_entries =
3212 adapter->max_rx_add_entries_per_subcrq;
3213
3214 max_entries = IBMVNIC_MAX_LTB_SIZE /
3215 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3216
3217 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3218 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3219 adapter->desired.tx_entries = max_entries;
3220 }
3221
3222 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3223 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3224 adapter->desired.rx_entries = max_entries;
3225 }
3226
3227 if (adapter->desired.tx_entries)
3228 adapter->req_tx_entries_per_subcrq =
3229 adapter->desired.tx_entries;
3230 else
3231 adapter->req_tx_entries_per_subcrq =
3232 adapter->max_tx_entries_per_subcrq;
3233
3234 if (adapter->desired.rx_entries)
3235 adapter->req_rx_add_entries_per_subcrq =
3236 adapter->desired.rx_entries;
3237 else
3238 adapter->req_rx_add_entries_per_subcrq =
3239 adapter->max_rx_add_entries_per_subcrq;
3240
3241 if (adapter->desired.tx_queues)
3242 adapter->req_tx_queues =
3243 adapter->desired.tx_queues;
3244 else
3245 adapter->req_tx_queues =
3246 adapter->opt_tx_comp_sub_queues;
3247
3248 if (adapter->desired.rx_queues)
3249 adapter->req_rx_queues =
3250 adapter->desired.rx_queues;
3251 else
3252 adapter->req_rx_queues =
3253 adapter->opt_rx_comp_queues;
3254
3255 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3256 }
3257
3258 memset(&crq, 0, sizeof(crq));
3259 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3260 crq.request_capability.cmd = REQUEST_CAPABILITY;
3261
3262 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3263 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3264 atomic_inc(&adapter->running_cap_crqs);
3265 ibmvnic_send_crq(adapter, &crq);
3266
3267 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3268 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3269 atomic_inc(&adapter->running_cap_crqs);
3270 ibmvnic_send_crq(adapter, &crq);
3271
3272 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3273 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3274 atomic_inc(&adapter->running_cap_crqs);
3275 ibmvnic_send_crq(adapter, &crq);
3276
3277 crq.request_capability.capability =
3278 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3279 crq.request_capability.number =
3280 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3281 atomic_inc(&adapter->running_cap_crqs);
3282 ibmvnic_send_crq(adapter, &crq);
3283
3284 crq.request_capability.capability =
3285 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3286 crq.request_capability.number =
3287 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3288 atomic_inc(&adapter->running_cap_crqs);
3289 ibmvnic_send_crq(adapter, &crq);
3290
3291 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3292 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3293 atomic_inc(&adapter->running_cap_crqs);
3294 ibmvnic_send_crq(adapter, &crq);
3295
3296 if (adapter->netdev->flags & IFF_PROMISC) {
3297 if (adapter->promisc_supported) {
3298 crq.request_capability.capability =
3299 cpu_to_be16(PROMISC_REQUESTED);
3300 crq.request_capability.number = cpu_to_be64(1);
3301 atomic_inc(&adapter->running_cap_crqs);
3302 ibmvnic_send_crq(adapter, &crq);
3303 }
3304 } else {
3305 crq.request_capability.capability =
3306 cpu_to_be16(PROMISC_REQUESTED);
3307 crq.request_capability.number = cpu_to_be64(0);
3308 atomic_inc(&adapter->running_cap_crqs);
3309 ibmvnic_send_crq(adapter, &crq);
3310 }
3311}
3312
3313static int pending_scrq(struct ibmvnic_adapter *adapter,
3314 struct ibmvnic_sub_crq_queue *scrq)
3315{
3316 union sub_crq *entry = &scrq->msgs[scrq->cur];
3317
3318 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3319 return 1;
3320 else
3321 return 0;
3322}
3323
3324static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3325 struct ibmvnic_sub_crq_queue *scrq)
3326{
3327 union sub_crq *entry;
3328 unsigned long flags;
3329
3330 spin_lock_irqsave(&scrq->lock, flags);
3331 entry = &scrq->msgs[scrq->cur];
3332 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3333 if (++scrq->cur == scrq->size)
3334 scrq->cur = 0;
3335 } else {
3336 entry = NULL;
3337 }
3338 spin_unlock_irqrestore(&scrq->lock, flags);
3339
3340 return entry;
3341}
3342
3343static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3344{
3345 struct ibmvnic_crq_queue *queue = &adapter->crq;
3346 union ibmvnic_crq *crq;
3347
3348 crq = &queue->msgs[queue->cur];
3349 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3350 if (++queue->cur == queue->size)
3351 queue->cur = 0;
3352 } else {
3353 crq = NULL;
3354 }
3355
3356 return crq;
3357}
3358
3359static void print_subcrq_error(struct device *dev, int rc, const char *func)
3360{
3361 switch (rc) {
3362 case H_PARAMETER:
3363 dev_warn_ratelimited(dev,
3364 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3365 func, rc);
3366 break;
3367 case H_CLOSED:
3368 dev_warn_ratelimited(dev,
3369 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3370 func, rc);
3371 break;
3372 default:
3373 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3374 break;
3375 }
3376}
3377
3378static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3379 union sub_crq *sub_crq)
3380{
3381 unsigned int ua = adapter->vdev->unit_address;
3382 struct device *dev = &adapter->vdev->dev;
3383 u64 *u64_crq = (u64 *)sub_crq;
3384 int rc;
3385
3386 netdev_dbg(adapter->netdev,
3387 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3388 (unsigned long int)cpu_to_be64(remote_handle),
3389 (unsigned long int)cpu_to_be64(u64_crq[0]),
3390 (unsigned long int)cpu_to_be64(u64_crq[1]),
3391 (unsigned long int)cpu_to_be64(u64_crq[2]),
3392 (unsigned long int)cpu_to_be64(u64_crq[3]));
3393
3394
3395 mb();
3396
3397 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3398 cpu_to_be64(remote_handle),
3399 cpu_to_be64(u64_crq[0]),
3400 cpu_to_be64(u64_crq[1]),
3401 cpu_to_be64(u64_crq[2]),
3402 cpu_to_be64(u64_crq[3]));
3403
3404 if (rc)
3405 print_subcrq_error(dev, rc, __func__);
3406
3407 return rc;
3408}
3409
3410static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3411 u64 remote_handle, u64 ioba, u64 num_entries)
3412{
3413 unsigned int ua = adapter->vdev->unit_address;
3414 struct device *dev = &adapter->vdev->dev;
3415 int rc;
3416
3417
3418 mb();
3419 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3420 cpu_to_be64(remote_handle),
3421 ioba, num_entries);
3422
3423 if (rc)
3424 print_subcrq_error(dev, rc, __func__);
3425
3426 return rc;
3427}
3428
3429static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3430 union ibmvnic_crq *crq)
3431{
3432 unsigned int ua = adapter->vdev->unit_address;
3433 struct device *dev = &adapter->vdev->dev;
3434 u64 *u64_crq = (u64 *)crq;
3435 int rc;
3436
3437 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3438 (unsigned long int)cpu_to_be64(u64_crq[0]),
3439 (unsigned long int)cpu_to_be64(u64_crq[1]));
3440
3441 if (!adapter->crq.active &&
3442 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3443 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3444 return -EINVAL;
3445 }
3446
3447
3448 mb();
3449
3450 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3451 cpu_to_be64(u64_crq[0]),
3452 cpu_to_be64(u64_crq[1]));
3453
3454 if (rc) {
3455 if (rc == H_CLOSED) {
3456 dev_warn(dev, "CRQ Queue closed\n");
3457 if (test_bit(0, &adapter->resetting))
3458 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3459 }
3460
3461 dev_warn(dev, "Send error (rc=%d)\n", rc);
3462 }
3463
3464 return rc;
3465}
3466
3467static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3468{
3469 union ibmvnic_crq crq;
3470
3471 memset(&crq, 0, sizeof(crq));
3472 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3473 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3474 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3475
3476 return ibmvnic_send_crq(adapter, &crq);
3477}
3478
3479static int send_version_xchg(struct ibmvnic_adapter *adapter)
3480{
3481 union ibmvnic_crq crq;
3482
3483 memset(&crq, 0, sizeof(crq));
3484 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3485 crq.version_exchange.cmd = VERSION_EXCHANGE;
3486 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3487
3488 return ibmvnic_send_crq(adapter, &crq);
3489}
3490
3491struct vnic_login_client_data {
3492 u8 type;
3493 __be16 len;
3494 char name[];
3495} __packed;
3496
3497static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3498{
3499 int len;
3500
3501
3502
3503
3504
3505 len = 4 * sizeof(struct vnic_login_client_data);
3506 len += 6;
3507 len += strlen(utsname()->nodename) + 1;
3508 len += strlen(adapter->netdev->name) + 1;
3509
3510 return len;
3511}
3512
3513static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3514 struct vnic_login_client_data *vlcd)
3515{
3516 const char *os_name = "Linux";
3517 int len;
3518
3519
3520 vlcd->type = 1;
3521 len = strlen(os_name) + 1;
3522 vlcd->len = cpu_to_be16(len);
3523 strncpy(vlcd->name, os_name, len);
3524 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3525
3526
3527 vlcd->type = 2;
3528 len = strlen(utsname()->nodename) + 1;
3529 vlcd->len = cpu_to_be16(len);
3530 strncpy(vlcd->name, utsname()->nodename, len);
3531 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3532
3533
3534 vlcd->type = 3;
3535 len = strlen(adapter->netdev->name) + 1;
3536 vlcd->len = cpu_to_be16(len);
3537 strncpy(vlcd->name, adapter->netdev->name, len);
3538}
3539
3540static int send_login(struct ibmvnic_adapter *adapter)
3541{
3542 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3543 struct ibmvnic_login_buffer *login_buffer;
3544 struct device *dev = &adapter->vdev->dev;
3545 dma_addr_t rsp_buffer_token;
3546 dma_addr_t buffer_token;
3547 size_t rsp_buffer_size;
3548 union ibmvnic_crq crq;
3549 size_t buffer_size;
3550 __be64 *tx_list_p;
3551 __be64 *rx_list_p;
3552 int client_data_len;
3553 struct vnic_login_client_data *vlcd;
3554 int i;
3555
3556 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3557 netdev_err(adapter->netdev,
3558 "RX or TX queues are not allocated, device login failed\n");
3559 return -1;
3560 }
3561
3562 release_login_rsp_buffer(adapter);
3563 client_data_len = vnic_client_data_len(adapter);
3564
3565 buffer_size =
3566 sizeof(struct ibmvnic_login_buffer) +
3567 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3568 client_data_len;
3569
3570 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3571 if (!login_buffer)
3572 goto buf_alloc_failed;
3573
3574 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3575 DMA_TO_DEVICE);
3576 if (dma_mapping_error(dev, buffer_token)) {
3577 dev_err(dev, "Couldn't map login buffer\n");
3578 goto buf_map_failed;
3579 }
3580
3581 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3582 sizeof(u64) * adapter->req_tx_queues +
3583 sizeof(u64) * adapter->req_rx_queues +
3584 sizeof(u64) * adapter->req_rx_queues +
3585 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3586
3587 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3588 if (!login_rsp_buffer)
3589 goto buf_rsp_alloc_failed;
3590
3591 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3592 rsp_buffer_size, DMA_FROM_DEVICE);
3593 if (dma_mapping_error(dev, rsp_buffer_token)) {
3594 dev_err(dev, "Couldn't map login rsp buffer\n");
3595 goto buf_rsp_map_failed;
3596 }
3597
3598 adapter->login_buf = login_buffer;
3599 adapter->login_buf_token = buffer_token;
3600 adapter->login_buf_sz = buffer_size;
3601 adapter->login_rsp_buf = login_rsp_buffer;
3602 adapter->login_rsp_buf_token = rsp_buffer_token;
3603 adapter->login_rsp_buf_sz = rsp_buffer_size;
3604
3605 login_buffer->len = cpu_to_be32(buffer_size);
3606 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3607 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3608 login_buffer->off_txcomp_subcrqs =
3609 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3610 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3611 login_buffer->off_rxcomp_subcrqs =
3612 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3613 sizeof(u64) * adapter->req_tx_queues);
3614 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3615 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3616
3617 tx_list_p = (__be64 *)((char *)login_buffer +
3618 sizeof(struct ibmvnic_login_buffer));
3619 rx_list_p = (__be64 *)((char *)login_buffer +
3620 sizeof(struct ibmvnic_login_buffer) +
3621 sizeof(u64) * adapter->req_tx_queues);
3622
3623 for (i = 0; i < adapter->req_tx_queues; i++) {
3624 if (adapter->tx_scrq[i]) {
3625 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3626 crq_num);
3627 }
3628 }
3629
3630 for (i = 0; i < adapter->req_rx_queues; i++) {
3631 if (adapter->rx_scrq[i]) {
3632 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3633 crq_num);
3634 }
3635 }
3636
3637
3638 vlcd = (struct vnic_login_client_data *)
3639 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3640 login_buffer->client_data_offset =
3641 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3642 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3643
3644 vnic_add_client_data(adapter, vlcd);
3645
3646 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3647 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3648 netdev_dbg(adapter->netdev, "%016lx\n",
3649 ((unsigned long int *)(adapter->login_buf))[i]);
3650 }
3651
3652 memset(&crq, 0, sizeof(crq));
3653 crq.login.first = IBMVNIC_CRQ_CMD;
3654 crq.login.cmd = LOGIN;
3655 crq.login.ioba = cpu_to_be32(buffer_token);
3656 crq.login.len = cpu_to_be32(buffer_size);
3657 ibmvnic_send_crq(adapter, &crq);
3658
3659 return 0;
3660
3661buf_rsp_map_failed:
3662 kfree(login_rsp_buffer);
3663buf_rsp_alloc_failed:
3664 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3665buf_map_failed:
3666 kfree(login_buffer);
3667buf_alloc_failed:
3668 return -1;
3669}
3670
3671static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3672 u32 len, u8 map_id)
3673{
3674 union ibmvnic_crq crq;
3675
3676 memset(&crq, 0, sizeof(crq));
3677 crq.request_map.first = IBMVNIC_CRQ_CMD;
3678 crq.request_map.cmd = REQUEST_MAP;
3679 crq.request_map.map_id = map_id;
3680 crq.request_map.ioba = cpu_to_be32(addr);
3681 crq.request_map.len = cpu_to_be32(len);
3682 return ibmvnic_send_crq(adapter, &crq);
3683}
3684
3685static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3686{
3687 union ibmvnic_crq crq;
3688
3689 memset(&crq, 0, sizeof(crq));
3690 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3691 crq.request_unmap.cmd = REQUEST_UNMAP;
3692 crq.request_unmap.map_id = map_id;
3693 return ibmvnic_send_crq(adapter, &crq);
3694}
3695
3696static void send_map_query(struct ibmvnic_adapter *adapter)
3697{
3698 union ibmvnic_crq crq;
3699
3700 memset(&crq, 0, sizeof(crq));
3701 crq.query_map.first = IBMVNIC_CRQ_CMD;
3702 crq.query_map.cmd = QUERY_MAP;
3703 ibmvnic_send_crq(adapter, &crq);
3704}
3705
3706
3707static void send_cap_queries(struct ibmvnic_adapter *adapter)
3708{
3709 union ibmvnic_crq crq;
3710
3711 atomic_set(&adapter->running_cap_crqs, 0);
3712 memset(&crq, 0, sizeof(crq));
3713 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3714 crq.query_capability.cmd = QUERY_CAPABILITY;
3715
3716 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3717 atomic_inc(&adapter->running_cap_crqs);
3718 ibmvnic_send_crq(adapter, &crq);
3719
3720 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3721 atomic_inc(&adapter->running_cap_crqs);
3722 ibmvnic_send_crq(adapter, &crq);
3723
3724 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3725 atomic_inc(&adapter->running_cap_crqs);
3726 ibmvnic_send_crq(adapter, &crq);
3727
3728 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3729 atomic_inc(&adapter->running_cap_crqs);
3730 ibmvnic_send_crq(adapter, &crq);
3731
3732 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3733 atomic_inc(&adapter->running_cap_crqs);
3734 ibmvnic_send_crq(adapter, &crq);
3735
3736 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3737 atomic_inc(&adapter->running_cap_crqs);
3738 ibmvnic_send_crq(adapter, &crq);
3739
3740 crq.query_capability.capability =
3741 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3742 atomic_inc(&adapter->running_cap_crqs);
3743 ibmvnic_send_crq(adapter, &crq);
3744
3745 crq.query_capability.capability =
3746 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3747 atomic_inc(&adapter->running_cap_crqs);
3748 ibmvnic_send_crq(adapter, &crq);
3749
3750 crq.query_capability.capability =
3751 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3752 atomic_inc(&adapter->running_cap_crqs);
3753 ibmvnic_send_crq(adapter, &crq);
3754
3755 crq.query_capability.capability =
3756 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3757 atomic_inc(&adapter->running_cap_crqs);
3758 ibmvnic_send_crq(adapter, &crq);
3759
3760 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3761 atomic_inc(&adapter->running_cap_crqs);
3762 ibmvnic_send_crq(adapter, &crq);
3763
3764 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3765 atomic_inc(&adapter->running_cap_crqs);
3766 ibmvnic_send_crq(adapter, &crq);
3767
3768 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3769 atomic_inc(&adapter->running_cap_crqs);
3770 ibmvnic_send_crq(adapter, &crq);
3771
3772 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3773 atomic_inc(&adapter->running_cap_crqs);
3774 ibmvnic_send_crq(adapter, &crq);
3775
3776 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3777 atomic_inc(&adapter->running_cap_crqs);
3778 ibmvnic_send_crq(adapter, &crq);
3779
3780 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3781 atomic_inc(&adapter->running_cap_crqs);
3782 ibmvnic_send_crq(adapter, &crq);
3783
3784 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3785 atomic_inc(&adapter->running_cap_crqs);
3786 ibmvnic_send_crq(adapter, &crq);
3787
3788 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3789 atomic_inc(&adapter->running_cap_crqs);
3790 ibmvnic_send_crq(adapter, &crq);
3791
3792 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3793 atomic_inc(&adapter->running_cap_crqs);
3794 ibmvnic_send_crq(adapter, &crq);
3795
3796 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3797 atomic_inc(&adapter->running_cap_crqs);
3798 ibmvnic_send_crq(adapter, &crq);
3799
3800 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3801 atomic_inc(&adapter->running_cap_crqs);
3802 ibmvnic_send_crq(adapter, &crq);
3803
3804 crq.query_capability.capability =
3805 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3806 atomic_inc(&adapter->running_cap_crqs);
3807 ibmvnic_send_crq(adapter, &crq);
3808
3809 crq.query_capability.capability =
3810 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3811 atomic_inc(&adapter->running_cap_crqs);
3812 ibmvnic_send_crq(adapter, &crq);
3813
3814 crq.query_capability.capability =
3815 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3816 atomic_inc(&adapter->running_cap_crqs);
3817 ibmvnic_send_crq(adapter, &crq);
3818
3819 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3820 atomic_inc(&adapter->running_cap_crqs);
3821 ibmvnic_send_crq(adapter, &crq);
3822}
3823
3824static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3825 struct ibmvnic_adapter *adapter)
3826{
3827 struct device *dev = &adapter->vdev->dev;
3828
3829 if (crq->get_vpd_size_rsp.rc.code) {
3830 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3831 crq->get_vpd_size_rsp.rc.code);
3832 complete(&adapter->fw_done);
3833 return;
3834 }
3835
3836 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3837 complete(&adapter->fw_done);
3838}
3839
3840static void handle_vpd_rsp(union ibmvnic_crq *crq,
3841 struct ibmvnic_adapter *adapter)
3842{
3843 struct device *dev = &adapter->vdev->dev;
3844 unsigned char *substr = NULL;
3845 u8 fw_level_len = 0;
3846
3847 memset(adapter->fw_version, 0, 32);
3848
3849 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3850 DMA_FROM_DEVICE);
3851
3852 if (crq->get_vpd_rsp.rc.code) {
3853 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3854 crq->get_vpd_rsp.rc.code);
3855 goto complete;
3856 }
3857
3858
3859
3860
3861 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3862 if (!substr) {
3863 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3864 goto complete;
3865 }
3866
3867
3868 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3869 fw_level_len = *(substr + 2);
3870 } else {
3871 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3872 goto complete;
3873 }
3874
3875
3876 if ((substr + 3 + fw_level_len) <
3877 (adapter->vpd->buff + adapter->vpd->len)) {
3878 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3879 } else {
3880 dev_info(dev, "FW substr extrapolated VPD buff\n");
3881 }
3882
3883complete:
3884 if (adapter->fw_version[0] == '\0')
3885 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3886 complete(&adapter->fw_done);
3887}
3888
3889static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3890{
3891 struct device *dev = &adapter->vdev->dev;
3892 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3893 netdev_features_t old_hw_features = 0;
3894 union ibmvnic_crq crq;
3895 int i;
3896
3897 dma_unmap_single(dev, adapter->ip_offload_tok,
3898 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3899
3900 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3901 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3902 netdev_dbg(adapter->netdev, "%016lx\n",
3903 ((unsigned long int *)(buf))[i]);
3904
3905 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3906 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3907 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3908 buf->tcp_ipv4_chksum);
3909 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3910 buf->tcp_ipv6_chksum);
3911 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3912 buf->udp_ipv4_chksum);
3913 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3914 buf->udp_ipv6_chksum);
3915 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3916 buf->large_tx_ipv4);
3917 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3918 buf->large_tx_ipv6);
3919 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3920 buf->large_rx_ipv4);
3921 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3922 buf->large_rx_ipv6);
3923 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3924 buf->max_ipv4_header_size);
3925 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3926 buf->max_ipv6_header_size);
3927 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3928 buf->max_tcp_header_size);
3929 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3930 buf->max_udp_header_size);
3931 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3932 buf->max_large_tx_size);
3933 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3934 buf->max_large_rx_size);
3935 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3936 buf->ipv6_extension_header);
3937 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3938 buf->tcp_pseudosum_req);
3939 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3940 buf->num_ipv6_ext_headers);
3941 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3942 buf->off_ipv6_ext_headers);
3943
3944 adapter->ip_offload_ctrl_tok =
3945 dma_map_single(dev, &adapter->ip_offload_ctrl,
3946 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3947
3948 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3949 dev_err(dev, "Couldn't map ip offload control buffer\n");
3950 return;
3951 }
3952
3953 adapter->ip_offload_ctrl.len =
3954 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3955 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3956 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3957 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3958 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3959 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3960 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3961 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3962 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3963 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3964
3965
3966 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3967 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3968
3969 if (adapter->state != VNIC_PROBING) {
3970 old_hw_features = adapter->netdev->hw_features;
3971 adapter->netdev->hw_features = 0;
3972 }
3973
3974 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3975
3976 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3977 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3978
3979 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3980 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3981
3982 if ((adapter->netdev->features &
3983 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3984 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3985
3986 if (buf->large_tx_ipv4)
3987 adapter->netdev->hw_features |= NETIF_F_TSO;
3988 if (buf->large_tx_ipv6)
3989 adapter->netdev->hw_features |= NETIF_F_TSO6;
3990
3991 if (adapter->state == VNIC_PROBING) {
3992 adapter->netdev->features |= adapter->netdev->hw_features;
3993 } else if (old_hw_features != adapter->netdev->hw_features) {
3994 netdev_features_t tmp = 0;
3995
3996
3997 adapter->netdev->features &= adapter->netdev->hw_features;
3998
3999 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4000 adapter->netdev->hw_features;
4001 adapter->netdev->features |=
4002 tmp & adapter->netdev->wanted_features;
4003 }
4004
4005 memset(&crq, 0, sizeof(crq));
4006 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4007 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4008 crq.control_ip_offload.len =
4009 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4010 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4011 ibmvnic_send_crq(adapter, &crq);
4012}
4013
4014static const char *ibmvnic_fw_err_cause(u16 cause)
4015{
4016 switch (cause) {
4017 case ADAPTER_PROBLEM:
4018 return "adapter problem";
4019 case BUS_PROBLEM:
4020 return "bus problem";
4021 case FW_PROBLEM:
4022 return "firmware problem";
4023 case DD_PROBLEM:
4024 return "device driver problem";
4025 case EEH_RECOVERY:
4026 return "EEH recovery";
4027 case FW_UPDATED:
4028 return "firmware updated";
4029 case LOW_MEMORY:
4030 return "low Memory";
4031 default:
4032 return "unknown";
4033 }
4034}
4035
4036static void handle_error_indication(union ibmvnic_crq *crq,
4037 struct ibmvnic_adapter *adapter)
4038{
4039 struct device *dev = &adapter->vdev->dev;
4040 u16 cause;
4041
4042 cause = be16_to_cpu(crq->error_indication.error_cause);
4043
4044 dev_warn_ratelimited(dev,
4045 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4046 crq->error_indication.flags
4047 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4048 ibmvnic_fw_err_cause(cause));
4049
4050 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4051 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4052 else
4053 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4054}
4055
4056static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4057 struct ibmvnic_adapter *adapter)
4058{
4059 struct net_device *netdev = adapter->netdev;
4060 struct device *dev = &adapter->vdev->dev;
4061 long rc;
4062
4063 rc = crq->change_mac_addr_rsp.rc.code;
4064 if (rc) {
4065 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4066 goto out;
4067 }
4068 ether_addr_copy(netdev->dev_addr,
4069 &crq->change_mac_addr_rsp.mac_addr[0]);
4070out:
4071 complete(&adapter->fw_done);
4072 return rc;
4073}
4074
4075static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4076 struct ibmvnic_adapter *adapter)
4077{
4078 struct device *dev = &adapter->vdev->dev;
4079 u64 *req_value;
4080 char *name;
4081
4082 atomic_dec(&adapter->running_cap_crqs);
4083 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4084 case REQ_TX_QUEUES:
4085 req_value = &adapter->req_tx_queues;
4086 name = "tx";
4087 break;
4088 case REQ_RX_QUEUES:
4089 req_value = &adapter->req_rx_queues;
4090 name = "rx";
4091 break;
4092 case REQ_RX_ADD_QUEUES:
4093 req_value = &adapter->req_rx_add_queues;
4094 name = "rx_add";
4095 break;
4096 case REQ_TX_ENTRIES_PER_SUBCRQ:
4097 req_value = &adapter->req_tx_entries_per_subcrq;
4098 name = "tx_entries_per_subcrq";
4099 break;
4100 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4101 req_value = &adapter->req_rx_add_entries_per_subcrq;
4102 name = "rx_add_entries_per_subcrq";
4103 break;
4104 case REQ_MTU:
4105 req_value = &adapter->req_mtu;
4106 name = "mtu";
4107 break;
4108 case PROMISC_REQUESTED:
4109 req_value = &adapter->promisc;
4110 name = "promisc";
4111 break;
4112 default:
4113 dev_err(dev, "Got invalid cap request rsp %d\n",
4114 crq->request_capability.capability);
4115 return;
4116 }
4117
4118 switch (crq->request_capability_rsp.rc.code) {
4119 case SUCCESS:
4120 break;
4121 case PARTIALSUCCESS:
4122 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4123 *req_value,
4124 (long int)be64_to_cpu(crq->request_capability_rsp.
4125 number), name);
4126
4127 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4128 REQ_MTU) {
4129 pr_err("mtu of %llu is not supported. Reverting.\n",
4130 *req_value);
4131 *req_value = adapter->fallback.mtu;
4132 } else {
4133 *req_value =
4134 be64_to_cpu(crq->request_capability_rsp.number);
4135 }
4136
4137 ibmvnic_send_req_caps(adapter, 1);
4138 return;
4139 default:
4140 dev_err(dev, "Error %d in request cap rsp\n",
4141 crq->request_capability_rsp.rc.code);
4142 return;
4143 }
4144
4145
4146 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4147 union ibmvnic_crq newcrq;
4148 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4149 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4150 &adapter->ip_offload_buf;
4151
4152 adapter->wait_capability = false;
4153 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4154 buf_sz,
4155 DMA_FROM_DEVICE);
4156
4157 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4158 if (!firmware_has_feature(FW_FEATURE_CMO))
4159 dev_err(dev, "Couldn't map offload buffer\n");
4160 return;
4161 }
4162
4163 memset(&newcrq, 0, sizeof(newcrq));
4164 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4165 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4166 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4167 newcrq.query_ip_offload.ioba =
4168 cpu_to_be32(adapter->ip_offload_tok);
4169
4170 ibmvnic_send_crq(adapter, &newcrq);
4171 }
4172}
4173
4174static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4175 struct ibmvnic_adapter *adapter)
4176{
4177 struct device *dev = &adapter->vdev->dev;
4178 struct net_device *netdev = adapter->netdev;
4179 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4180 struct ibmvnic_login_buffer *login = adapter->login_buf;
4181 int i;
4182
4183 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4184 DMA_TO_DEVICE);
4185 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4186 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4187
4188
4189
4190
4191
4192 if (login_rsp_crq->generic.rc.code) {
4193 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4194 complete(&adapter->init_done);
4195 return 0;
4196 }
4197
4198 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4199
4200 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4201 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4202 netdev_dbg(adapter->netdev, "%016lx\n",
4203 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4204 }
4205
4206
4207 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4208 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4209 adapter->req_rx_add_queues !=
4210 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4211 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4212 ibmvnic_remove(adapter->vdev);
4213 return -EIO;
4214 }
4215 release_login_buffer(adapter);
4216 complete(&adapter->init_done);
4217
4218 return 0;
4219}
4220
4221static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4222 struct ibmvnic_adapter *adapter)
4223{
4224 struct device *dev = &adapter->vdev->dev;
4225 long rc;
4226
4227 rc = crq->request_unmap_rsp.rc.code;
4228 if (rc)
4229 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4230}
4231
4232static void handle_query_map_rsp(union ibmvnic_crq *crq,
4233 struct ibmvnic_adapter *adapter)
4234{
4235 struct net_device *netdev = adapter->netdev;
4236 struct device *dev = &adapter->vdev->dev;
4237 long rc;
4238
4239 rc = crq->query_map_rsp.rc.code;
4240 if (rc) {
4241 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4242 return;
4243 }
4244 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4245 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4246 crq->query_map_rsp.free_pages);
4247}
4248
4249static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4250 struct ibmvnic_adapter *adapter)
4251{
4252 struct net_device *netdev = adapter->netdev;
4253 struct device *dev = &adapter->vdev->dev;
4254 long rc;
4255
4256 atomic_dec(&adapter->running_cap_crqs);
4257 netdev_dbg(netdev, "Outstanding queries: %d\n",
4258 atomic_read(&adapter->running_cap_crqs));
4259 rc = crq->query_capability.rc.code;
4260 if (rc) {
4261 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4262 goto out;
4263 }
4264
4265 switch (be16_to_cpu(crq->query_capability.capability)) {
4266 case MIN_TX_QUEUES:
4267 adapter->min_tx_queues =
4268 be64_to_cpu(crq->query_capability.number);
4269 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4270 adapter->min_tx_queues);
4271 break;
4272 case MIN_RX_QUEUES:
4273 adapter->min_rx_queues =
4274 be64_to_cpu(crq->query_capability.number);
4275 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4276 adapter->min_rx_queues);
4277 break;
4278 case MIN_RX_ADD_QUEUES:
4279 adapter->min_rx_add_queues =
4280 be64_to_cpu(crq->query_capability.number);
4281 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4282 adapter->min_rx_add_queues);
4283 break;
4284 case MAX_TX_QUEUES:
4285 adapter->max_tx_queues =
4286 be64_to_cpu(crq->query_capability.number);
4287 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4288 adapter->max_tx_queues);
4289 break;
4290 case MAX_RX_QUEUES:
4291 adapter->max_rx_queues =
4292 be64_to_cpu(crq->query_capability.number);
4293 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4294 adapter->max_rx_queues);
4295 break;
4296 case MAX_RX_ADD_QUEUES:
4297 adapter->max_rx_add_queues =
4298 be64_to_cpu(crq->query_capability.number);
4299 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4300 adapter->max_rx_add_queues);
4301 break;
4302 case MIN_TX_ENTRIES_PER_SUBCRQ:
4303 adapter->min_tx_entries_per_subcrq =
4304 be64_to_cpu(crq->query_capability.number);
4305 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4306 adapter->min_tx_entries_per_subcrq);
4307 break;
4308 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4309 adapter->min_rx_add_entries_per_subcrq =
4310 be64_to_cpu(crq->query_capability.number);
4311 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4312 adapter->min_rx_add_entries_per_subcrq);
4313 break;
4314 case MAX_TX_ENTRIES_PER_SUBCRQ:
4315 adapter->max_tx_entries_per_subcrq =
4316 be64_to_cpu(crq->query_capability.number);
4317 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4318 adapter->max_tx_entries_per_subcrq);
4319 break;
4320 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4321 adapter->max_rx_add_entries_per_subcrq =
4322 be64_to_cpu(crq->query_capability.number);
4323 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4324 adapter->max_rx_add_entries_per_subcrq);
4325 break;
4326 case TCP_IP_OFFLOAD:
4327 adapter->tcp_ip_offload =
4328 be64_to_cpu(crq->query_capability.number);
4329 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4330 adapter->tcp_ip_offload);
4331 break;
4332 case PROMISC_SUPPORTED:
4333 adapter->promisc_supported =
4334 be64_to_cpu(crq->query_capability.number);
4335 netdev_dbg(netdev, "promisc_supported = %lld\n",
4336 adapter->promisc_supported);
4337 break;
4338 case MIN_MTU:
4339 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4340 netdev->extended->min_mtu = adapter->min_mtu - ETH_HLEN;
4341 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4342 break;
4343 case MAX_MTU:
4344 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4345 netdev->extended->max_mtu = adapter->max_mtu - ETH_HLEN;
4346 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4347 break;
4348 case MAX_MULTICAST_FILTERS:
4349 adapter->max_multicast_filters =
4350 be64_to_cpu(crq->query_capability.number);
4351 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4352 adapter->max_multicast_filters);
4353 break;
4354 case VLAN_HEADER_INSERTION:
4355 adapter->vlan_header_insertion =
4356 be64_to_cpu(crq->query_capability.number);
4357 if (adapter->vlan_header_insertion)
4358 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4359 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4360 adapter->vlan_header_insertion);
4361 break;
4362 case RX_VLAN_HEADER_INSERTION:
4363 adapter->rx_vlan_header_insertion =
4364 be64_to_cpu(crq->query_capability.number);
4365 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4366 adapter->rx_vlan_header_insertion);
4367 break;
4368 case MAX_TX_SG_ENTRIES:
4369 adapter->max_tx_sg_entries =
4370 be64_to_cpu(crq->query_capability.number);
4371 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4372 adapter->max_tx_sg_entries);
4373 break;
4374 case RX_SG_SUPPORTED:
4375 adapter->rx_sg_supported =
4376 be64_to_cpu(crq->query_capability.number);
4377 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4378 adapter->rx_sg_supported);
4379 break;
4380 case OPT_TX_COMP_SUB_QUEUES:
4381 adapter->opt_tx_comp_sub_queues =
4382 be64_to_cpu(crq->query_capability.number);
4383 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4384 adapter->opt_tx_comp_sub_queues);
4385 break;
4386 case OPT_RX_COMP_QUEUES:
4387 adapter->opt_rx_comp_queues =
4388 be64_to_cpu(crq->query_capability.number);
4389 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4390 adapter->opt_rx_comp_queues);
4391 break;
4392 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4393 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4394 be64_to_cpu(crq->query_capability.number);
4395 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4396 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4397 break;
4398 case OPT_TX_ENTRIES_PER_SUBCRQ:
4399 adapter->opt_tx_entries_per_subcrq =
4400 be64_to_cpu(crq->query_capability.number);
4401 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4402 adapter->opt_tx_entries_per_subcrq);
4403 break;
4404 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4405 adapter->opt_rxba_entries_per_subcrq =
4406 be64_to_cpu(crq->query_capability.number);
4407 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4408 adapter->opt_rxba_entries_per_subcrq);
4409 break;
4410 case TX_RX_DESC_REQ:
4411 adapter->tx_rx_desc_req = crq->query_capability.number;
4412 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4413 adapter->tx_rx_desc_req);
4414 break;
4415
4416 default:
4417 netdev_err(netdev, "Got invalid cap rsp %d\n",
4418 crq->query_capability.capability);
4419 }
4420
4421out:
4422 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4423 adapter->wait_capability = false;
4424 ibmvnic_send_req_caps(adapter, 0);
4425 }
4426}
4427
4428static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4429{
4430 union ibmvnic_crq crq;
4431 int rc;
4432
4433 memset(&crq, 0, sizeof(crq));
4434 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4435 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4436 init_completion(&adapter->fw_done);
4437 rc = ibmvnic_send_crq(adapter, &crq);
4438 if (rc)
4439 return rc;
4440 wait_for_completion(&adapter->fw_done);
4441 return adapter->fw_done_rc ? -EIO : 0;
4442}
4443
4444static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4445 struct ibmvnic_adapter *adapter)
4446{
4447 struct net_device *netdev = adapter->netdev;
4448 int rc;
4449 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4450
4451 rc = crq->query_phys_parms_rsp.rc.code;
4452 if (rc) {
4453 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4454 return rc;
4455 }
4456 switch (rspeed) {
4457 case IBMVNIC_10MBPS:
4458 adapter->speed = SPEED_10;
4459 break;
4460 case IBMVNIC_100MBPS:
4461 adapter->speed = SPEED_100;
4462 break;
4463 case IBMVNIC_1GBPS:
4464 adapter->speed = SPEED_1000;
4465 break;
4466 case IBMVNIC_10GBP:
4467 adapter->speed = SPEED_10000;
4468 break;
4469 case IBMVNIC_25GBPS:
4470 adapter->speed = SPEED_25000;
4471 break;
4472 case IBMVNIC_40GBPS:
4473 adapter->speed = SPEED_40000;
4474 break;
4475 case IBMVNIC_50GBPS:
4476 adapter->speed = SPEED_50000;
4477 break;
4478 case IBMVNIC_100GBPS:
4479 adapter->speed = SPEED_100000;
4480 break;
4481 default:
4482 if (netif_carrier_ok(netdev))
4483 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4484 adapter->speed = SPEED_UNKNOWN;
4485 }
4486 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4487 adapter->duplex = DUPLEX_FULL;
4488 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4489 adapter->duplex = DUPLEX_HALF;
4490 else
4491 adapter->duplex = DUPLEX_UNKNOWN;
4492
4493 return rc;
4494}
4495
4496static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4497 struct ibmvnic_adapter *adapter)
4498{
4499 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4500 struct net_device *netdev = adapter->netdev;
4501 struct device *dev = &adapter->vdev->dev;
4502 u64 *u64_crq = (u64 *)crq;
4503 long rc;
4504
4505 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4506 (unsigned long int)cpu_to_be64(u64_crq[0]),
4507 (unsigned long int)cpu_to_be64(u64_crq[1]));
4508 switch (gen_crq->first) {
4509 case IBMVNIC_CRQ_INIT_RSP:
4510 switch (gen_crq->cmd) {
4511 case IBMVNIC_CRQ_INIT:
4512 dev_info(dev, "Partner initialized\n");
4513 adapter->from_passive_init = true;
4514 adapter->failover_pending = false;
4515 if (!completion_done(&adapter->init_done)) {
4516 complete(&adapter->init_done);
4517 adapter->init_done_rc = -EIO;
4518 }
4519 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4520 break;
4521 case IBMVNIC_CRQ_INIT_COMPLETE:
4522 dev_info(dev, "Partner initialization complete\n");
4523 adapter->crq.active = true;
4524 send_version_xchg(adapter);
4525 break;
4526 default:
4527 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4528 }
4529 return;
4530 case IBMVNIC_CRQ_XPORT_EVENT:
4531 netif_carrier_off(netdev);
4532 adapter->crq.active = false;
4533 if (test_bit(0, &adapter->resetting))
4534 adapter->force_reset_recovery = true;
4535 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4536 dev_info(dev, "Migrated, re-enabling adapter\n");
4537 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4538 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4539 dev_info(dev, "Backing device failover detected\n");
4540 adapter->failover_pending = true;
4541 } else {
4542
4543 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4544 gen_crq->cmd);
4545 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4546 }
4547 return;
4548 case IBMVNIC_CRQ_CMD_RSP:
4549 break;
4550 default:
4551 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4552 gen_crq->first);
4553 return;
4554 }
4555
4556 switch (gen_crq->cmd) {
4557 case VERSION_EXCHANGE_RSP:
4558 rc = crq->version_exchange_rsp.rc.code;
4559 if (rc) {
4560 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4561 break;
4562 }
4563 dev_info(dev, "Partner protocol version is %d\n",
4564 crq->version_exchange_rsp.version);
4565 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4566 ibmvnic_version)
4567 ibmvnic_version =
4568 be16_to_cpu(crq->version_exchange_rsp.version);
4569 send_cap_queries(adapter);
4570 break;
4571 case QUERY_CAPABILITY_RSP:
4572 handle_query_cap_rsp(crq, adapter);
4573 break;
4574 case QUERY_MAP_RSP:
4575 handle_query_map_rsp(crq, adapter);
4576 break;
4577 case REQUEST_MAP_RSP:
4578 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4579 complete(&adapter->fw_done);
4580 break;
4581 case REQUEST_UNMAP_RSP:
4582 handle_request_unmap_rsp(crq, adapter);
4583 break;
4584 case REQUEST_CAPABILITY_RSP:
4585 handle_request_cap_rsp(crq, adapter);
4586 break;
4587 case LOGIN_RSP:
4588 netdev_dbg(netdev, "Got Login Response\n");
4589 handle_login_rsp(crq, adapter);
4590 break;
4591 case LOGICAL_LINK_STATE_RSP:
4592 netdev_dbg(netdev,
4593 "Got Logical Link State Response, state: %d rc: %d\n",
4594 crq->logical_link_state_rsp.link_state,
4595 crq->logical_link_state_rsp.rc.code);
4596 adapter->logical_link_state =
4597 crq->logical_link_state_rsp.link_state;
4598 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4599 complete(&adapter->init_done);
4600 break;
4601 case LINK_STATE_INDICATION:
4602 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4603 adapter->phys_link_state =
4604 crq->link_state_indication.phys_link_state;
4605 adapter->logical_link_state =
4606 crq->link_state_indication.logical_link_state;
4607 if (adapter->phys_link_state && adapter->logical_link_state)
4608 netif_carrier_on(netdev);
4609 else
4610 netif_carrier_off(netdev);
4611 break;
4612 case CHANGE_MAC_ADDR_RSP:
4613 netdev_dbg(netdev, "Got MAC address change Response\n");
4614 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4615 break;
4616 case ERROR_INDICATION:
4617 netdev_dbg(netdev, "Got Error Indication\n");
4618 handle_error_indication(crq, adapter);
4619 break;
4620 case REQUEST_STATISTICS_RSP:
4621 netdev_dbg(netdev, "Got Statistics Response\n");
4622 complete(&adapter->stats_done);
4623 break;
4624 case QUERY_IP_OFFLOAD_RSP:
4625 netdev_dbg(netdev, "Got Query IP offload Response\n");
4626 handle_query_ip_offload_rsp(adapter);
4627 break;
4628 case MULTICAST_CTRL_RSP:
4629 netdev_dbg(netdev, "Got multicast control Response\n");
4630 break;
4631 case CONTROL_IP_OFFLOAD_RSP:
4632 netdev_dbg(netdev, "Got Control IP offload Response\n");
4633 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4634 sizeof(adapter->ip_offload_ctrl),
4635 DMA_TO_DEVICE);
4636 complete(&adapter->init_done);
4637 break;
4638 case COLLECT_FW_TRACE_RSP:
4639 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4640 complete(&adapter->fw_done);
4641 break;
4642 case GET_VPD_SIZE_RSP:
4643 handle_vpd_size_rsp(crq, adapter);
4644 break;
4645 case GET_VPD_RSP:
4646 handle_vpd_rsp(crq, adapter);
4647 break;
4648 case QUERY_PHYS_PARMS_RSP:
4649 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4650 complete(&adapter->fw_done);
4651 break;
4652 default:
4653 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4654 gen_crq->cmd);
4655 }
4656}
4657
4658static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4659{
4660 struct ibmvnic_adapter *adapter = instance;
4661
4662 tasklet_schedule(&adapter->tasklet);
4663 return IRQ_HANDLED;
4664}
4665
4666static void ibmvnic_tasklet(void *data)
4667{
4668 struct ibmvnic_adapter *adapter = data;
4669 struct ibmvnic_crq_queue *queue = &adapter->crq;
4670 union ibmvnic_crq *crq;
4671 unsigned long flags;
4672 bool done = false;
4673
4674 spin_lock_irqsave(&queue->lock, flags);
4675 while (!done) {
4676
4677 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4678 ibmvnic_handle_crq(crq, adapter);
4679 crq->generic.first = 0;
4680 }
4681
4682
4683
4684
4685 if (!adapter->wait_capability)
4686 done = true;
4687 }
4688
4689
4690
4691 if (atomic_read(&adapter->running_cap_crqs) != 0)
4692 adapter->wait_capability = true;
4693 spin_unlock_irqrestore(&queue->lock, flags);
4694}
4695
4696static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4697{
4698 struct vio_dev *vdev = adapter->vdev;
4699 int rc;
4700
4701 do {
4702 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4703 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4704
4705 if (rc)
4706 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4707
4708 return rc;
4709}
4710
4711static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4712{
4713 struct ibmvnic_crq_queue *crq = &adapter->crq;
4714 struct device *dev = &adapter->vdev->dev;
4715 struct vio_dev *vdev = adapter->vdev;
4716 int rc;
4717
4718
4719 do {
4720 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4721 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4722
4723
4724 memset(crq->msgs, 0, PAGE_SIZE);
4725 crq->cur = 0;
4726 crq->active = false;
4727
4728
4729 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4730 crq->msg_token, PAGE_SIZE);
4731
4732 if (rc == H_CLOSED)
4733
4734 dev_warn(dev, "Partner adapter not ready\n");
4735 else if (rc != 0)
4736 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4737
4738 return rc;
4739}
4740
4741static void release_crq_queue(struct ibmvnic_adapter *adapter)
4742{
4743 struct ibmvnic_crq_queue *crq = &adapter->crq;
4744 struct vio_dev *vdev = adapter->vdev;
4745 long rc;
4746
4747 if (!crq->msgs)
4748 return;
4749
4750 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4751 free_irq(vdev->irq, adapter);
4752 tasklet_kill(&adapter->tasklet);
4753 do {
4754 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4755 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4756
4757 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4758 DMA_BIDIRECTIONAL);
4759 free_page((unsigned long)crq->msgs);
4760 crq->msgs = NULL;
4761 crq->active = false;
4762}
4763
4764static int init_crq_queue(struct ibmvnic_adapter *adapter)
4765{
4766 struct ibmvnic_crq_queue *crq = &adapter->crq;
4767 struct device *dev = &adapter->vdev->dev;
4768 struct vio_dev *vdev = adapter->vdev;
4769 int rc, retrc = -ENOMEM;
4770
4771 if (crq->msgs)
4772 return 0;
4773
4774 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4775
4776
4777 if (!crq->msgs)
4778 return -ENOMEM;
4779
4780 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4781 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4782 DMA_BIDIRECTIONAL);
4783 if (dma_mapping_error(dev, crq->msg_token))
4784 goto map_failed;
4785
4786 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4787 crq->msg_token, PAGE_SIZE);
4788
4789 if (rc == H_RESOURCE)
4790
4791 rc = ibmvnic_reset_crq(adapter);
4792 retrc = rc;
4793
4794 if (rc == H_CLOSED) {
4795 dev_warn(dev, "Partner adapter not ready\n");
4796 } else if (rc) {
4797 dev_warn(dev, "Error %d opening adapter\n", rc);
4798 goto reg_crq_failed;
4799 }
4800
4801 retrc = 0;
4802
4803 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4804 (unsigned long)adapter);
4805
4806 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4807 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4808 adapter);
4809 if (rc) {
4810 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4811 vdev->irq, rc);
4812 goto req_irq_failed;
4813 }
4814
4815 rc = vio_enable_interrupts(vdev);
4816 if (rc) {
4817 dev_err(dev, "Error %d enabling interrupts\n", rc);
4818 goto req_irq_failed;
4819 }
4820
4821 crq->cur = 0;
4822 spin_lock_init(&crq->lock);
4823
4824 return retrc;
4825
4826req_irq_failed:
4827 tasklet_kill(&adapter->tasklet);
4828 do {
4829 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4830 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4831reg_crq_failed:
4832 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4833map_failed:
4834 free_page((unsigned long)crq->msgs);
4835 crq->msgs = NULL;
4836 return retrc;
4837}
4838
4839static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4840{
4841 struct device *dev = &adapter->vdev->dev;
4842 unsigned long timeout = msecs_to_jiffies(30000);
4843 u64 old_num_rx_queues, old_num_tx_queues;
4844 int rc;
4845
4846 adapter->from_passive_init = false;
4847
4848 old_num_rx_queues = adapter->req_rx_queues;
4849 old_num_tx_queues = adapter->req_tx_queues;
4850
4851 reinit_completion(&adapter->init_done);
4852 adapter->init_done_rc = 0;
4853 ibmvnic_send_crq_init(adapter);
4854 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4855 dev_err(dev, "Initialization sequence timed out\n");
4856 return -1;
4857 }
4858
4859 if (adapter->init_done_rc) {
4860 release_crq_queue(adapter);
4861 return adapter->init_done_rc;
4862 }
4863
4864 if (adapter->from_passive_init) {
4865 adapter->state = VNIC_OPEN;
4866 adapter->from_passive_init = false;
4867 return -1;
4868 }
4869
4870 if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
4871 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4872 if (adapter->req_rx_queues != old_num_rx_queues ||
4873 adapter->req_tx_queues != old_num_tx_queues) {
4874 release_sub_crqs(adapter, 0);
4875 rc = init_sub_crqs(adapter);
4876 } else {
4877 rc = reset_sub_crq_queues(adapter);
4878 }
4879 } else {
4880 rc = init_sub_crqs(adapter);
4881 }
4882
4883 if (rc) {
4884 dev_err(dev, "Initialization of sub crqs failed\n");
4885 release_crq_queue(adapter);
4886 return rc;
4887 }
4888
4889 rc = init_sub_crq_irqs(adapter);
4890 if (rc) {
4891 dev_err(dev, "Failed to initialize sub crq irqs\n");
4892 release_crq_queue(adapter);
4893 }
4894
4895 return rc;
4896}
4897
4898static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4899{
4900 struct device *dev = &adapter->vdev->dev;
4901 unsigned long timeout = msecs_to_jiffies(30000);
4902 int rc;
4903
4904 adapter->from_passive_init = false;
4905
4906 adapter->init_done_rc = 0;
4907 ibmvnic_send_crq_init(adapter);
4908 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4909 dev_err(dev, "Initialization sequence timed out\n");
4910 return -1;
4911 }
4912
4913 if (adapter->init_done_rc) {
4914 release_crq_queue(adapter);
4915 return adapter->init_done_rc;
4916 }
4917
4918 if (adapter->from_passive_init) {
4919 adapter->state = VNIC_OPEN;
4920 adapter->from_passive_init = false;
4921 return -1;
4922 }
4923
4924 rc = init_sub_crqs(adapter);
4925 if (rc) {
4926 dev_err(dev, "Initialization of sub crqs failed\n");
4927 release_crq_queue(adapter);
4928 return rc;
4929 }
4930
4931 rc = init_sub_crq_irqs(adapter);
4932 if (rc) {
4933 dev_err(dev, "Failed to initialize sub crq irqs\n");
4934 release_crq_queue(adapter);
4935 }
4936
4937 return rc;
4938}
4939
4940static struct device_attribute dev_attr_failover;
4941
4942static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4943{
4944 struct ibmvnic_adapter *adapter;
4945 struct net_device *netdev;
4946 unsigned char *mac_addr_p;
4947 int rc;
4948
4949 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4950 dev->unit_address);
4951
4952 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4953 VETH_MAC_ADDR, NULL);
4954 if (!mac_addr_p) {
4955 dev_err(&dev->dev,
4956 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4957 __FILE__, __LINE__);
4958 return 0;
4959 }
4960
4961 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4962 IBMVNIC_MAX_QUEUES);
4963 if (!netdev)
4964 return -ENOMEM;
4965
4966 adapter = netdev_priv(netdev);
4967 adapter->state = VNIC_PROBING;
4968 dev_set_drvdata(&dev->dev, netdev);
4969 adapter->vdev = dev;
4970 adapter->netdev = netdev;
4971
4972 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4973 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4974 netdev->irq = dev->irq;
4975 netdev->netdev_ops = &ibmvnic_netdev_ops;
4976 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4977 SET_NETDEV_DEV(netdev, &dev->dev);
4978
4979 spin_lock_init(&adapter->stats_lock);
4980
4981 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4982 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
4983 __ibmvnic_delayed_reset);
4984 INIT_LIST_HEAD(&adapter->rwi_list);
4985 spin_lock_init(&adapter->rwi_lock);
4986 init_completion(&adapter->init_done);
4987 clear_bit(0, &adapter->resetting);
4988
4989 do {
4990 rc = init_crq_queue(adapter);
4991 if (rc) {
4992 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4993 rc);
4994 goto ibmvnic_init_fail;
4995 }
4996
4997 rc = ibmvnic_init(adapter);
4998 if (rc && rc != EAGAIN)
4999 goto ibmvnic_init_fail;
5000 } while (rc == EAGAIN);
5001
5002 rc = init_stats_buffers(adapter);
5003 if (rc)
5004 goto ibmvnic_init_fail;
5005
5006 rc = init_stats_token(adapter);
5007 if (rc)
5008 goto ibmvnic_stats_fail;
5009
5010 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5011 netdev->extended->min_mtu = adapter->min_mtu - ETH_HLEN;
5012 netdev->extended->max_mtu = adapter->max_mtu - ETH_HLEN;
5013
5014 rc = device_create_file(&dev->dev, &dev_attr_failover);
5015 if (rc)
5016 goto ibmvnic_dev_file_err;
5017
5018 netif_carrier_off(netdev);
5019 rc = register_netdev(netdev);
5020 if (rc) {
5021 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5022 goto ibmvnic_register_fail;
5023 }
5024 dev_info(&dev->dev, "ibmvnic registered\n");
5025
5026 adapter->state = VNIC_PROBED;
5027
5028 adapter->wait_for_reset = false;
5029
5030 return 0;
5031
5032ibmvnic_register_fail:
5033 device_remove_file(&dev->dev, &dev_attr_failover);
5034
5035ibmvnic_dev_file_err:
5036 release_stats_token(adapter);
5037
5038ibmvnic_stats_fail:
5039 release_stats_buffers(adapter);
5040
5041ibmvnic_init_fail:
5042 release_sub_crqs(adapter, 1);
5043 release_crq_queue(adapter);
5044 free_netdev(netdev);
5045
5046 return rc;
5047}
5048
5049static int ibmvnic_remove(struct vio_dev *dev)
5050{
5051 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5052 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5053
5054 adapter->state = VNIC_REMOVING;
5055 rtnl_lock();
5056 unregister_netdevice(netdev);
5057
5058 release_resources(adapter);
5059 release_sub_crqs(adapter, 1);
5060 release_crq_queue(adapter);
5061
5062 release_stats_token(adapter);
5063 release_stats_buffers(adapter);
5064
5065 adapter->state = VNIC_REMOVED;
5066
5067 rtnl_unlock();
5068 device_remove_file(&dev->dev, &dev_attr_failover);
5069 free_netdev(netdev);
5070 dev_set_drvdata(&dev->dev, NULL);
5071
5072 return 0;
5073}
5074
5075static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5076 const char *buf, size_t count)
5077{
5078 struct net_device *netdev = dev_get_drvdata(dev);
5079 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5080 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5081 __be64 session_token;
5082 long rc;
5083
5084 if (!sysfs_streq(buf, "1"))
5085 return -EINVAL;
5086
5087 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5088 H_GET_SESSION_TOKEN, 0, 0, 0);
5089 if (rc) {
5090 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5091 rc);
5092 return -EINVAL;
5093 }
5094
5095 session_token = (__be64)retbuf[0];
5096 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5097 be64_to_cpu(session_token));
5098 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5099 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5100 if (rc) {
5101 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5102 rc);
5103 return -EINVAL;
5104 }
5105
5106 return count;
5107}
5108
5109static DEVICE_ATTR(failover, 0200, NULL, failover_store);
5110
5111static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5112{
5113 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5114 struct ibmvnic_adapter *adapter;
5115 struct iommu_table *tbl;
5116 unsigned long ret = 0;
5117 int i;
5118
5119 tbl = get_iommu_table_base(&vdev->dev);
5120
5121
5122 if (!netdev)
5123 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5124
5125 adapter = netdev_priv(netdev);
5126
5127 ret += PAGE_SIZE;
5128 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5129
5130 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5131 ret += 4 * PAGE_SIZE;
5132
5133 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5134 i++)
5135 ret += adapter->rx_pool[i].size *
5136 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5137
5138 return ret;
5139}
5140
5141static int ibmvnic_resume(struct device *dev)
5142{
5143 struct net_device *netdev = dev_get_drvdata(dev);
5144 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5145
5146 if (adapter->state != VNIC_OPEN)
5147 return 0;
5148
5149 tasklet_schedule(&adapter->tasklet);
5150
5151 return 0;
5152}
5153
5154static const struct vio_device_id ibmvnic_device_table[] = {
5155 {"network", "IBM,vnic"},
5156 {"", "" }
5157};
5158MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5159
5160static const struct dev_pm_ops ibmvnic_pm_ops = {
5161 .resume = ibmvnic_resume
5162};
5163
5164static struct vio_driver ibmvnic_driver = {
5165 .id_table = ibmvnic_device_table,
5166 .probe = ibmvnic_probe,
5167 .remove = ibmvnic_remove,
5168 .get_desired_dma = ibmvnic_get_desired_dma,
5169 .name = ibmvnic_driver_name,
5170 .pm = &ibmvnic_pm_ops,
5171};
5172
5173
5174static int __init ibmvnic_module_init(void)
5175{
5176 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5177 IBMVNIC_DRIVER_VERSION);
5178
5179 return vio_register_driver(&ibmvnic_driver);
5180}
5181
5182static void __exit ibmvnic_module_exit(void)
5183{
5184 vio_unregister_driver(&ibmvnic_driver);
5185}
5186
5187module_init(ibmvnic_module_init);
5188module_exit(ibmvnic_module_exit);
5189