1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/if_arp.h>
63#include <linux/in.h>
64#include <linux/ip.h>
65#include <linux/ipv6.h>
66#include <linux/irq.h>
67#include <linux/kthread.h>
68#include <linux/seq_file.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
77#include <linux/workqueue.h>
78#include <linux/if_vlan.h>
79#include <linux/utsname.h>
80
81#include "ibmvnic.h"
82
83static const char ibmvnic_driver_name[] = "ibmvnic";
84static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
85
86MODULE_AUTHOR("Santiago Leon");
87MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88MODULE_LICENSE("GPL");
89MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
90
91static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92static int ibmvnic_remove(struct vio_dev *);
93static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110static int ibmvnic_poll(struct napi_struct *napi, int data);
111static void send_map_query(struct ibmvnic_adapter *adapter);
112static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113static int send_request_unmap(struct ibmvnic_adapter *, u8);
114static int send_login(struct ibmvnic_adapter *adapter);
115static void send_cap_queries(struct ibmvnic_adapter *adapter);
116static int init_sub_crqs(struct ibmvnic_adapter *);
117static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118static int ibmvnic_init(struct ibmvnic_adapter *);
119static int ibmvnic_reset_init(struct ibmvnic_adapter *);
120static void release_crq_queue(struct ibmvnic_adapter *);
121static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
122static int init_crq_queue(struct ibmvnic_adapter *adapter);
123
124struct ibmvnic_stat {
125 char name[ETH_GSTRING_LEN];
126 int offset;
127};
128
129#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
130 offsetof(struct ibmvnic_statistics, stat))
131#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
132
133static const struct ibmvnic_stat ibmvnic_stats[] = {
134 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
135 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
136 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
137 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
138 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
139 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
140 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
141 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
142 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
143 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
144 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
145 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
146 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
147 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
148 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
149 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
150 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
151 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
152 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
153 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
154 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
155 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
156};
157
158static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
159 unsigned long length, unsigned long *number,
160 unsigned long *irq)
161{
162 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
163 long rc;
164
165 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
166 *number = retbuf[0];
167 *irq = retbuf[1];
168
169 return rc;
170}
171
172static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
173 struct ibmvnic_long_term_buff *ltb, int size)
174{
175 struct device *dev = &adapter->vdev->dev;
176 int rc;
177
178 ltb->size = size;
179 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
180 GFP_KERNEL);
181
182 if (!ltb->buff) {
183 dev_err(dev, "Couldn't alloc long term buffer\n");
184 return -ENOMEM;
185 }
186 ltb->map_id = adapter->map_id;
187 adapter->map_id++;
188
189 init_completion(&adapter->fw_done);
190 rc = send_request_map(adapter, ltb->addr,
191 ltb->size, ltb->map_id);
192 if (rc) {
193 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
194 return rc;
195 }
196 wait_for_completion(&adapter->fw_done);
197
198 if (adapter->fw_done_rc) {
199 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
200 adapter->fw_done_rc);
201 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
202 return -1;
203 }
204 return 0;
205}
206
207static void free_long_term_buff(struct ibmvnic_adapter *adapter,
208 struct ibmvnic_long_term_buff *ltb)
209{
210 struct device *dev = &adapter->vdev->dev;
211
212 if (!ltb->buff)
213 return;
214
215 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
216 adapter->reset_reason != VNIC_RESET_MOBILITY)
217 send_request_unmap(adapter, ltb->map_id);
218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
219}
220
221static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
222 struct ibmvnic_long_term_buff *ltb)
223{
224 int rc;
225
226 memset(ltb->buff, 0, ltb->size);
227
228 init_completion(&adapter->fw_done);
229 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
230 if (rc)
231 return rc;
232 wait_for_completion(&adapter->fw_done);
233
234 if (adapter->fw_done_rc) {
235 dev_info(&adapter->vdev->dev,
236 "Reset failed, attempting to free and reallocate buffer\n");
237 free_long_term_buff(adapter, ltb);
238 return alloc_long_term_buff(adapter, ltb, ltb->size);
239 }
240 return 0;
241}
242
243static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
244{
245 int i;
246
247 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
248 i++)
249 adapter->rx_pool[i].active = 0;
250}
251
252static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
253 struct ibmvnic_rx_pool *pool)
254{
255 int count = pool->size - atomic_read(&pool->available);
256 struct device *dev = &adapter->vdev->dev;
257 int buffers_added = 0;
258 unsigned long lpar_rc;
259 union sub_crq sub_crq;
260 struct sk_buff *skb;
261 unsigned int offset;
262 dma_addr_t dma_addr;
263 unsigned char *dst;
264 u64 *handle_array;
265 int shift = 0;
266 int index;
267 int i;
268
269 if (!pool->active)
270 return;
271
272 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
273 be32_to_cpu(adapter->login_rsp_buf->
274 off_rxadd_subcrqs));
275
276 for (i = 0; i < count; ++i) {
277 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
278 if (!skb) {
279 dev_err(dev, "Couldn't replenish rx buff\n");
280 adapter->replenish_no_mem++;
281 break;
282 }
283
284 index = pool->free_map[pool->next_free];
285
286 if (pool->rx_buff[index].skb)
287 dev_err(dev, "Inconsistent free_map!\n");
288
289
290 offset = index * pool->buff_size;
291 dst = pool->long_term_buff.buff + offset;
292 memset(dst, 0, pool->buff_size);
293 dma_addr = pool->long_term_buff.addr + offset;
294 pool->rx_buff[index].data = dst;
295
296 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
297 pool->rx_buff[index].dma = dma_addr;
298 pool->rx_buff[index].skb = skb;
299 pool->rx_buff[index].pool_index = pool->index;
300 pool->rx_buff[index].size = pool->buff_size;
301
302 memset(&sub_crq, 0, sizeof(sub_crq));
303 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
304 sub_crq.rx_add.correlator =
305 cpu_to_be64((u64)&pool->rx_buff[index]);
306 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
307 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
308
309
310
311
312
313
314#ifdef __LITTLE_ENDIAN__
315 shift = 8;
316#endif
317 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
318
319 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
320 &sub_crq);
321 if (lpar_rc != H_SUCCESS)
322 goto failure;
323
324 buffers_added++;
325 adapter->replenish_add_buff_success++;
326 pool->next_free = (pool->next_free + 1) % pool->size;
327 }
328 atomic_add(buffers_added, &pool->available);
329 return;
330
331failure:
332 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
333 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
334 pool->free_map[pool->next_free] = index;
335 pool->rx_buff[index].skb = NULL;
336
337 dev_kfree_skb_any(skb);
338 adapter->replenish_add_buff_failure++;
339 atomic_add(buffers_added, &pool->available);
340
341 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
342
343
344
345
346
347 deactivate_rx_pools(adapter);
348 netif_carrier_off(adapter->netdev);
349 }
350}
351
352static void replenish_pools(struct ibmvnic_adapter *adapter)
353{
354 int i;
355
356 adapter->replenish_task_cycles++;
357 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
358 i++) {
359 if (adapter->rx_pool[i].active)
360 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
361 }
362}
363
364static void release_stats_buffers(struct ibmvnic_adapter *adapter)
365{
366 kfree(adapter->tx_stats_buffers);
367 kfree(adapter->rx_stats_buffers);
368 adapter->tx_stats_buffers = NULL;
369 adapter->rx_stats_buffers = NULL;
370}
371
372static int init_stats_buffers(struct ibmvnic_adapter *adapter)
373{
374 adapter->tx_stats_buffers =
375 kcalloc(IBMVNIC_MAX_QUEUES,
376 sizeof(struct ibmvnic_tx_queue_stats),
377 GFP_KERNEL);
378 if (!adapter->tx_stats_buffers)
379 return -ENOMEM;
380
381 adapter->rx_stats_buffers =
382 kcalloc(IBMVNIC_MAX_QUEUES,
383 sizeof(struct ibmvnic_rx_queue_stats),
384 GFP_KERNEL);
385 if (!adapter->rx_stats_buffers)
386 return -ENOMEM;
387
388 return 0;
389}
390
391static void release_stats_token(struct ibmvnic_adapter *adapter)
392{
393 struct device *dev = &adapter->vdev->dev;
394
395 if (!adapter->stats_token)
396 return;
397
398 dma_unmap_single(dev, adapter->stats_token,
399 sizeof(struct ibmvnic_statistics),
400 DMA_FROM_DEVICE);
401 adapter->stats_token = 0;
402}
403
404static int init_stats_token(struct ibmvnic_adapter *adapter)
405{
406 struct device *dev = &adapter->vdev->dev;
407 dma_addr_t stok;
408
409 stok = dma_map_single(dev, &adapter->stats,
410 sizeof(struct ibmvnic_statistics),
411 DMA_FROM_DEVICE);
412 if (dma_mapping_error(dev, stok)) {
413 dev_err(dev, "Couldn't map stats buffer\n");
414 return -1;
415 }
416
417 adapter->stats_token = stok;
418 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
419 return 0;
420}
421
422static int reset_rx_pools(struct ibmvnic_adapter *adapter)
423{
424 struct ibmvnic_rx_pool *rx_pool;
425 int rx_scrqs;
426 int i, j, rc;
427 u64 *size_array;
428
429 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
430 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
431
432 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
433 for (i = 0; i < rx_scrqs; i++) {
434 rx_pool = &adapter->rx_pool[i];
435
436 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
437
438 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
439 free_long_term_buff(adapter, &rx_pool->long_term_buff);
440 rx_pool->buff_size = be64_to_cpu(size_array[i]);
441 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
442 rx_pool->size *
443 rx_pool->buff_size);
444 } else {
445 rc = reset_long_term_buff(adapter,
446 &rx_pool->long_term_buff);
447 }
448
449 if (rc)
450 return rc;
451
452 for (j = 0; j < rx_pool->size; j++)
453 rx_pool->free_map[j] = j;
454
455 memset(rx_pool->rx_buff, 0,
456 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
457
458 atomic_set(&rx_pool->available, 0);
459 rx_pool->next_alloc = 0;
460 rx_pool->next_free = 0;
461 rx_pool->active = 1;
462 }
463
464 return 0;
465}
466
467static void release_rx_pools(struct ibmvnic_adapter *adapter)
468{
469 struct ibmvnic_rx_pool *rx_pool;
470 int i, j;
471
472 if (!adapter->rx_pool)
473 return;
474
475 for (i = 0; i < adapter->num_active_rx_pools; i++) {
476 rx_pool = &adapter->rx_pool[i];
477
478 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
479
480 kfree(rx_pool->free_map);
481 free_long_term_buff(adapter, &rx_pool->long_term_buff);
482
483 if (!rx_pool->rx_buff)
484 continue;
485
486 for (j = 0; j < rx_pool->size; j++) {
487 if (rx_pool->rx_buff[j].skb) {
488 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
489 rx_pool->rx_buff[j].skb = NULL;
490 }
491 }
492
493 kfree(rx_pool->rx_buff);
494 }
495
496 kfree(adapter->rx_pool);
497 adapter->rx_pool = NULL;
498 adapter->num_active_rx_pools = 0;
499}
500
501static int init_rx_pools(struct net_device *netdev)
502{
503 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
504 struct device *dev = &adapter->vdev->dev;
505 struct ibmvnic_rx_pool *rx_pool;
506 int rxadd_subcrqs;
507 u64 *size_array;
508 int i, j;
509
510 rxadd_subcrqs =
511 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
512 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
513 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
514
515 adapter->rx_pool = kcalloc(rxadd_subcrqs,
516 sizeof(struct ibmvnic_rx_pool),
517 GFP_KERNEL);
518 if (!adapter->rx_pool) {
519 dev_err(dev, "Failed to allocate rx pools\n");
520 return -1;
521 }
522
523 adapter->num_active_rx_pools = rxadd_subcrqs;
524
525 for (i = 0; i < rxadd_subcrqs; i++) {
526 rx_pool = &adapter->rx_pool[i];
527
528 netdev_dbg(adapter->netdev,
529 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
530 i, adapter->req_rx_add_entries_per_subcrq,
531 be64_to_cpu(size_array[i]));
532
533 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
534 rx_pool->index = i;
535 rx_pool->buff_size = be64_to_cpu(size_array[i]);
536 rx_pool->active = 1;
537
538 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
539 GFP_KERNEL);
540 if (!rx_pool->free_map) {
541 release_rx_pools(adapter);
542 return -1;
543 }
544
545 rx_pool->rx_buff = kcalloc(rx_pool->size,
546 sizeof(struct ibmvnic_rx_buff),
547 GFP_KERNEL);
548 if (!rx_pool->rx_buff) {
549 dev_err(dev, "Couldn't alloc rx buffers\n");
550 release_rx_pools(adapter);
551 return -1;
552 }
553
554 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
555 rx_pool->size * rx_pool->buff_size)) {
556 release_rx_pools(adapter);
557 return -1;
558 }
559
560 for (j = 0; j < rx_pool->size; ++j)
561 rx_pool->free_map[j] = j;
562
563 atomic_set(&rx_pool->available, 0);
564 rx_pool->next_alloc = 0;
565 rx_pool->next_free = 0;
566 }
567
568 return 0;
569}
570
571static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
572 struct ibmvnic_tx_pool *tx_pool)
573{
574 int rc, i;
575
576 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
577 if (rc)
578 return rc;
579
580 memset(tx_pool->tx_buff, 0,
581 tx_pool->num_buffers *
582 sizeof(struct ibmvnic_tx_buff));
583
584 for (i = 0; i < tx_pool->num_buffers; i++)
585 tx_pool->free_map[i] = i;
586
587 tx_pool->consumer_index = 0;
588 tx_pool->producer_index = 0;
589
590 return 0;
591}
592
593static int reset_tx_pools(struct ibmvnic_adapter *adapter)
594{
595 int tx_scrqs;
596 int i, rc;
597
598 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
599 for (i = 0; i < tx_scrqs; i++) {
600 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
601 if (rc)
602 return rc;
603 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
604 if (rc)
605 return rc;
606 }
607
608 return 0;
609}
610
611static void release_vpd_data(struct ibmvnic_adapter *adapter)
612{
613 if (!adapter->vpd)
614 return;
615
616 kfree(adapter->vpd->buff);
617 kfree(adapter->vpd);
618
619 adapter->vpd = NULL;
620}
621
622static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
623 struct ibmvnic_tx_pool *tx_pool)
624{
625 kfree(tx_pool->tx_buff);
626 kfree(tx_pool->free_map);
627 free_long_term_buff(adapter, &tx_pool->long_term_buff);
628}
629
630static void release_tx_pools(struct ibmvnic_adapter *adapter)
631{
632 int i;
633
634 if (!adapter->tx_pool)
635 return;
636
637 for (i = 0; i < adapter->num_active_tx_pools; i++) {
638 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
639 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
640 }
641
642 kfree(adapter->tx_pool);
643 adapter->tx_pool = NULL;
644 kfree(adapter->tso_pool);
645 adapter->tso_pool = NULL;
646 adapter->num_active_tx_pools = 0;
647}
648
649static int init_one_tx_pool(struct net_device *netdev,
650 struct ibmvnic_tx_pool *tx_pool,
651 int num_entries, int buf_size)
652{
653 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
654 int i;
655
656 tx_pool->tx_buff = kcalloc(num_entries,
657 sizeof(struct ibmvnic_tx_buff),
658 GFP_KERNEL);
659 if (!tx_pool->tx_buff)
660 return -1;
661
662 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
663 num_entries * buf_size))
664 return -1;
665
666 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
667 if (!tx_pool->free_map)
668 return -1;
669
670 for (i = 0; i < num_entries; i++)
671 tx_pool->free_map[i] = i;
672
673 tx_pool->consumer_index = 0;
674 tx_pool->producer_index = 0;
675 tx_pool->num_buffers = num_entries;
676 tx_pool->buf_size = buf_size;
677
678 return 0;
679}
680
681static int init_tx_pools(struct net_device *netdev)
682{
683 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
684 int tx_subcrqs;
685 int i, rc;
686
687 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
688 adapter->tx_pool = kcalloc(tx_subcrqs,
689 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
690 if (!adapter->tx_pool)
691 return -1;
692
693 adapter->tso_pool = kcalloc(tx_subcrqs,
694 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
695 if (!adapter->tso_pool)
696 return -1;
697
698 adapter->num_active_tx_pools = tx_subcrqs;
699
700 for (i = 0; i < tx_subcrqs; i++) {
701 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
702 adapter->req_tx_entries_per_subcrq,
703 adapter->req_mtu + VLAN_HLEN);
704 if (rc) {
705 release_tx_pools(adapter);
706 return rc;
707 }
708
709 init_one_tx_pool(netdev, &adapter->tso_pool[i],
710 IBMVNIC_TSO_BUFS,
711 IBMVNIC_TSO_BUF_SZ);
712 if (rc) {
713 release_tx_pools(adapter);
714 return rc;
715 }
716 }
717
718 return 0;
719}
720
721static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
722{
723 int i;
724
725 if (adapter->napi_enabled)
726 return;
727
728 for (i = 0; i < adapter->req_rx_queues; i++)
729 napi_enable(&adapter->napi[i]);
730
731 adapter->napi_enabled = true;
732}
733
734static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
735{
736 int i;
737
738 if (!adapter->napi_enabled)
739 return;
740
741 for (i = 0; i < adapter->req_rx_queues; i++) {
742 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
743 napi_disable(&adapter->napi[i]);
744 }
745
746 adapter->napi_enabled = false;
747}
748
749static int init_napi(struct ibmvnic_adapter *adapter)
750{
751 int i;
752
753 adapter->napi = kcalloc(adapter->req_rx_queues,
754 sizeof(struct napi_struct), GFP_KERNEL);
755 if (!adapter->napi)
756 return -ENOMEM;
757
758 for (i = 0; i < adapter->req_rx_queues; i++) {
759 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
760 netif_napi_add(adapter->netdev, &adapter->napi[i],
761 ibmvnic_poll, NAPI_POLL_WEIGHT);
762 }
763
764 adapter->num_active_rx_napi = adapter->req_rx_queues;
765 return 0;
766}
767
768static void release_napi(struct ibmvnic_adapter *adapter)
769{
770 int i;
771
772 if (!adapter->napi)
773 return;
774
775 for (i = 0; i < adapter->num_active_rx_napi; i++) {
776 if (&adapter->napi[i]) {
777 netdev_dbg(adapter->netdev,
778 "Releasing napi[%d]\n", i);
779 netif_napi_del(&adapter->napi[i]);
780 }
781 }
782
783 kfree(adapter->napi);
784 adapter->napi = NULL;
785 adapter->num_active_rx_napi = 0;
786 adapter->napi_enabled = false;
787}
788
789static int ibmvnic_login(struct net_device *netdev)
790{
791 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
792 unsigned long timeout = msecs_to_jiffies(30000);
793 int retry_count = 0;
794 bool retry;
795 int rc;
796
797 do {
798 retry = false;
799 if (retry_count > IBMVNIC_MAX_QUEUES) {
800 netdev_warn(netdev, "Login attempts exceeded\n");
801 return -1;
802 }
803
804 adapter->init_done_rc = 0;
805 reinit_completion(&adapter->init_done);
806 rc = send_login(adapter);
807 if (rc) {
808 netdev_warn(netdev, "Unable to login\n");
809 return rc;
810 }
811
812 if (!wait_for_completion_timeout(&adapter->init_done,
813 timeout)) {
814 netdev_warn(netdev, "Login timed out\n");
815 return -1;
816 }
817
818 if (adapter->init_done_rc == PARTIALSUCCESS) {
819 retry_count++;
820 release_sub_crqs(adapter, 1);
821
822 retry = true;
823 netdev_dbg(netdev,
824 "Received partial success, retrying...\n");
825 adapter->init_done_rc = 0;
826 reinit_completion(&adapter->init_done);
827 send_cap_queries(adapter);
828 if (!wait_for_completion_timeout(&adapter->init_done,
829 timeout)) {
830 netdev_warn(netdev,
831 "Capabilities query timed out\n");
832 return -1;
833 }
834
835 rc = init_sub_crqs(adapter);
836 if (rc) {
837 netdev_warn(netdev,
838 "SCRQ initialization failed\n");
839 return -1;
840 }
841
842 rc = init_sub_crq_irqs(adapter);
843 if (rc) {
844 netdev_warn(netdev,
845 "SCRQ irq initialization failed\n");
846 return -1;
847 }
848 } else if (adapter->init_done_rc) {
849 netdev_warn(netdev, "Adapter login failed\n");
850 return -1;
851 }
852 } while (retry);
853
854
855 if (adapter->mac_change_pending) {
856 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
857 adapter->mac_change_pending = false;
858 }
859
860 return 0;
861}
862
863static void release_login_buffer(struct ibmvnic_adapter *adapter)
864{
865 kfree(adapter->login_buf);
866 adapter->login_buf = NULL;
867}
868
869static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
870{
871 kfree(adapter->login_rsp_buf);
872 adapter->login_rsp_buf = NULL;
873}
874
875static void release_resources(struct ibmvnic_adapter *adapter)
876{
877 release_vpd_data(adapter);
878
879 release_tx_pools(adapter);
880 release_rx_pools(adapter);
881
882 release_napi(adapter);
883 release_login_rsp_buffer(adapter);
884}
885
886static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
887{
888 struct net_device *netdev = adapter->netdev;
889 unsigned long timeout = msecs_to_jiffies(30000);
890 union ibmvnic_crq crq;
891 bool resend;
892 int rc;
893
894 netdev_dbg(netdev, "setting link state %d\n", link_state);
895
896 memset(&crq, 0, sizeof(crq));
897 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
898 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
899 crq.logical_link_state.link_state = link_state;
900
901 do {
902 resend = false;
903
904 reinit_completion(&adapter->init_done);
905 rc = ibmvnic_send_crq(adapter, &crq);
906 if (rc) {
907 netdev_err(netdev, "Failed to set link state\n");
908 return rc;
909 }
910
911 if (!wait_for_completion_timeout(&adapter->init_done,
912 timeout)) {
913 netdev_err(netdev, "timeout setting link state\n");
914 return -1;
915 }
916
917 if (adapter->init_done_rc == 1) {
918
919 mdelay(1000);
920 resend = true;
921 } else if (adapter->init_done_rc) {
922 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
923 adapter->init_done_rc);
924 return adapter->init_done_rc;
925 }
926 } while (resend);
927
928 return 0;
929}
930
931static int set_real_num_queues(struct net_device *netdev)
932{
933 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
934 int rc;
935
936 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
937 adapter->req_tx_queues, adapter->req_rx_queues);
938
939 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
940 if (rc) {
941 netdev_err(netdev, "failed to set the number of tx queues\n");
942 return rc;
943 }
944
945 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
946 if (rc)
947 netdev_err(netdev, "failed to set the number of rx queues\n");
948
949 return rc;
950}
951
952static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
953{
954 struct device *dev = &adapter->vdev->dev;
955 union ibmvnic_crq crq;
956 int len = 0;
957 int rc;
958
959 if (adapter->vpd->buff)
960 len = adapter->vpd->len;
961
962 init_completion(&adapter->fw_done);
963 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
964 crq.get_vpd_size.cmd = GET_VPD_SIZE;
965 rc = ibmvnic_send_crq(adapter, &crq);
966 if (rc)
967 return rc;
968 wait_for_completion(&adapter->fw_done);
969
970 if (!adapter->vpd->len)
971 return -ENODATA;
972
973 if (!adapter->vpd->buff)
974 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
975 else if (adapter->vpd->len != len)
976 adapter->vpd->buff =
977 krealloc(adapter->vpd->buff,
978 adapter->vpd->len, GFP_KERNEL);
979
980 if (!adapter->vpd->buff) {
981 dev_err(dev, "Could allocate VPD buffer\n");
982 return -ENOMEM;
983 }
984
985 adapter->vpd->dma_addr =
986 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
987 DMA_FROM_DEVICE);
988 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
989 dev_err(dev, "Could not map VPD buffer\n");
990 kfree(adapter->vpd->buff);
991 adapter->vpd->buff = NULL;
992 return -ENOMEM;
993 }
994
995 reinit_completion(&adapter->fw_done);
996 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
997 crq.get_vpd.cmd = GET_VPD;
998 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
999 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1000 rc = ibmvnic_send_crq(adapter, &crq);
1001 if (rc) {
1002 kfree(adapter->vpd->buff);
1003 adapter->vpd->buff = NULL;
1004 return rc;
1005 }
1006 wait_for_completion(&adapter->fw_done);
1007
1008 return 0;
1009}
1010
1011static int init_resources(struct ibmvnic_adapter *adapter)
1012{
1013 struct net_device *netdev = adapter->netdev;
1014 int rc;
1015
1016 rc = set_real_num_queues(netdev);
1017 if (rc)
1018 return rc;
1019
1020 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1021 if (!adapter->vpd)
1022 return -ENOMEM;
1023
1024
1025 rc = ibmvnic_get_vpd(adapter);
1026 if (rc) {
1027 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1028 return rc;
1029 }
1030
1031 adapter->map_id = 1;
1032
1033 rc = init_napi(adapter);
1034 if (rc)
1035 return rc;
1036
1037 send_map_query(adapter);
1038
1039 rc = init_rx_pools(netdev);
1040 if (rc)
1041 return rc;
1042
1043 rc = init_tx_pools(netdev);
1044 return rc;
1045}
1046
1047static int __ibmvnic_open(struct net_device *netdev)
1048{
1049 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1050 enum vnic_state prev_state = adapter->state;
1051 int i, rc;
1052
1053 adapter->state = VNIC_OPENING;
1054 replenish_pools(adapter);
1055 ibmvnic_napi_enable(adapter);
1056
1057
1058
1059
1060 for (i = 0; i < adapter->req_rx_queues; i++) {
1061 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1062 if (prev_state == VNIC_CLOSED)
1063 enable_irq(adapter->rx_scrq[i]->irq);
1064 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1065 }
1066
1067 for (i = 0; i < adapter->req_tx_queues; i++) {
1068 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1069 if (prev_state == VNIC_CLOSED)
1070 enable_irq(adapter->tx_scrq[i]->irq);
1071 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1072 }
1073
1074 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1075 if (rc) {
1076 for (i = 0; i < adapter->req_rx_queues; i++)
1077 napi_disable(&adapter->napi[i]);
1078 release_resources(adapter);
1079 return rc;
1080 }
1081
1082 netif_tx_start_all_queues(netdev);
1083
1084 if (prev_state == VNIC_CLOSED) {
1085 for (i = 0; i < adapter->req_rx_queues; i++)
1086 napi_schedule(&adapter->napi[i]);
1087 }
1088
1089 adapter->state = VNIC_OPEN;
1090 return rc;
1091}
1092
1093static int ibmvnic_open(struct net_device *netdev)
1094{
1095 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1096 int rc;
1097
1098
1099
1100
1101 if (adapter->failover_pending) {
1102 adapter->state = VNIC_OPEN;
1103 return 0;
1104 }
1105
1106 if (adapter->state != VNIC_CLOSED) {
1107 rc = ibmvnic_login(netdev);
1108 if (rc)
1109 return rc;
1110
1111 rc = init_resources(adapter);
1112 if (rc) {
1113 netdev_err(netdev, "failed to initialize resources\n");
1114 release_resources(adapter);
1115 return rc;
1116 }
1117 }
1118
1119 rc = __ibmvnic_open(netdev);
1120 netif_carrier_on(netdev);
1121
1122 return rc;
1123}
1124
1125static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1126{
1127 struct ibmvnic_rx_pool *rx_pool;
1128 struct ibmvnic_rx_buff *rx_buff;
1129 u64 rx_entries;
1130 int rx_scrqs;
1131 int i, j;
1132
1133 if (!adapter->rx_pool)
1134 return;
1135
1136 rx_scrqs = adapter->num_active_rx_pools;
1137 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1138
1139
1140 for (i = 0; i < rx_scrqs; i++) {
1141 rx_pool = &adapter->rx_pool[i];
1142 if (!rx_pool || !rx_pool->rx_buff)
1143 continue;
1144
1145 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1146 for (j = 0; j < rx_entries; j++) {
1147 rx_buff = &rx_pool->rx_buff[j];
1148 if (rx_buff && rx_buff->skb) {
1149 dev_kfree_skb_any(rx_buff->skb);
1150 rx_buff->skb = NULL;
1151 }
1152 }
1153 }
1154}
1155
1156static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1157 struct ibmvnic_tx_pool *tx_pool)
1158{
1159 struct ibmvnic_tx_buff *tx_buff;
1160 u64 tx_entries;
1161 int i;
1162
1163 if (!tx_pool || !tx_pool->tx_buff)
1164 return;
1165
1166 tx_entries = tx_pool->num_buffers;
1167
1168 for (i = 0; i < tx_entries; i++) {
1169 tx_buff = &tx_pool->tx_buff[i];
1170 if (tx_buff && tx_buff->skb) {
1171 dev_kfree_skb_any(tx_buff->skb);
1172 tx_buff->skb = NULL;
1173 }
1174 }
1175}
1176
1177static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1178{
1179 int tx_scrqs;
1180 int i;
1181
1182 if (!adapter->tx_pool || !adapter->tso_pool)
1183 return;
1184
1185 tx_scrqs = adapter->num_active_tx_pools;
1186
1187
1188 for (i = 0; i < tx_scrqs; i++) {
1189 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1190 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1191 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1192 }
1193}
1194
1195static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1196{
1197 struct net_device *netdev = adapter->netdev;
1198 int i;
1199
1200 if (adapter->tx_scrq) {
1201 for (i = 0; i < adapter->req_tx_queues; i++)
1202 if (adapter->tx_scrq[i]->irq) {
1203 netdev_dbg(netdev,
1204 "Disabling tx_scrq[%d] irq\n", i);
1205 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1206 disable_irq(adapter->tx_scrq[i]->irq);
1207 }
1208 }
1209
1210 if (adapter->rx_scrq) {
1211 for (i = 0; i < adapter->req_rx_queues; i++) {
1212 if (adapter->rx_scrq[i]->irq) {
1213 netdev_dbg(netdev,
1214 "Disabling rx_scrq[%d] irq\n", i);
1215 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1216 disable_irq(adapter->rx_scrq[i]->irq);
1217 }
1218 }
1219 }
1220}
1221
1222static void ibmvnic_cleanup(struct net_device *netdev)
1223{
1224 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1225
1226
1227 if (adapter->resetting)
1228 netif_tx_disable(netdev);
1229 else
1230 netif_tx_stop_all_queues(netdev);
1231
1232 ibmvnic_napi_disable(adapter);
1233 ibmvnic_disable_irqs(adapter);
1234
1235 clean_rx_pools(adapter);
1236 clean_tx_pools(adapter);
1237}
1238
1239static int __ibmvnic_close(struct net_device *netdev)
1240{
1241 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1242 int rc = 0;
1243
1244 adapter->state = VNIC_CLOSING;
1245 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1246 if (rc)
1247 return rc;
1248 adapter->state = VNIC_CLOSED;
1249 return 0;
1250}
1251
1252static int ibmvnic_close(struct net_device *netdev)
1253{
1254 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1255 int rc;
1256
1257
1258
1259
1260 if (adapter->failover_pending) {
1261 adapter->state = VNIC_CLOSED;
1262 return 0;
1263 }
1264
1265 rc = __ibmvnic_close(netdev);
1266 ibmvnic_cleanup(netdev);
1267
1268 return rc;
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1283 int *hdr_len, u8 *hdr_data)
1284{
1285 int len = 0;
1286 u8 *hdr;
1287
1288 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1289 hdr_len[0] = sizeof(struct vlan_ethhdr);
1290 else
1291 hdr_len[0] = sizeof(struct ethhdr);
1292
1293 if (skb->protocol == htons(ETH_P_IP)) {
1294 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1295 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1296 hdr_len[2] = tcp_hdrlen(skb);
1297 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1298 hdr_len[2] = sizeof(struct udphdr);
1299 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1300 hdr_len[1] = sizeof(struct ipv6hdr);
1301 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1302 hdr_len[2] = tcp_hdrlen(skb);
1303 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1304 hdr_len[2] = sizeof(struct udphdr);
1305 } else if (skb->protocol == htons(ETH_P_ARP)) {
1306 hdr_len[1] = arp_hdr_len(skb->dev);
1307 hdr_len[2] = 0;
1308 }
1309
1310 memset(hdr_data, 0, 120);
1311 if ((hdr_field >> 6) & 1) {
1312 hdr = skb_mac_header(skb);
1313 memcpy(hdr_data, hdr, hdr_len[0]);
1314 len += hdr_len[0];
1315 }
1316
1317 if ((hdr_field >> 5) & 1) {
1318 hdr = skb_network_header(skb);
1319 memcpy(hdr_data + len, hdr, hdr_len[1]);
1320 len += hdr_len[1];
1321 }
1322
1323 if ((hdr_field >> 4) & 1) {
1324 hdr = skb_transport_header(skb);
1325 memcpy(hdr_data + len, hdr, hdr_len[2]);
1326 len += hdr_len[2];
1327 }
1328 return len;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1344 union sub_crq *scrq_arr)
1345{
1346 union sub_crq hdr_desc;
1347 int tmp_len = len;
1348 int num_descs = 0;
1349 u8 *data, *cur;
1350 int tmp;
1351
1352 while (tmp_len > 0) {
1353 cur = hdr_data + len - tmp_len;
1354
1355 memset(&hdr_desc, 0, sizeof(hdr_desc));
1356 if (cur != hdr_data) {
1357 data = hdr_desc.hdr_ext.data;
1358 tmp = tmp_len > 29 ? 29 : tmp_len;
1359 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1360 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1361 hdr_desc.hdr_ext.len = tmp;
1362 } else {
1363 data = hdr_desc.hdr.data;
1364 tmp = tmp_len > 24 ? 24 : tmp_len;
1365 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1366 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1367 hdr_desc.hdr.len = tmp;
1368 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1369 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1370 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1371 hdr_desc.hdr.flag = hdr_field << 1;
1372 }
1373 memcpy(data, cur, tmp);
1374 tmp_len -= tmp;
1375 *scrq_arr = hdr_desc;
1376 scrq_arr++;
1377 num_descs++;
1378 }
1379
1380 return num_descs;
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1395 int *num_entries, u8 hdr_field)
1396{
1397 int hdr_len[3] = {0, 0, 0};
1398 int tot_len;
1399 u8 *hdr_data = txbuff->hdr_data;
1400
1401 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1402 txbuff->hdr_data);
1403 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1404 txbuff->indir_arr + 1);
1405}
1406
1407static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1408 struct net_device *netdev)
1409{
1410
1411
1412
1413
1414
1415
1416 if (skb->len < netdev->min_mtu)
1417 return skb_put_padto(skb, netdev->min_mtu);
1418
1419 return 0;
1420}
1421
1422static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1423{
1424 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1425 int queue_num = skb_get_queue_mapping(skb);
1426 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1427 struct device *dev = &adapter->vdev->dev;
1428 struct ibmvnic_tx_buff *tx_buff = NULL;
1429 struct ibmvnic_sub_crq_queue *tx_scrq;
1430 struct ibmvnic_tx_pool *tx_pool;
1431 unsigned int tx_send_failed = 0;
1432 unsigned int tx_map_failed = 0;
1433 unsigned int tx_dropped = 0;
1434 unsigned int tx_packets = 0;
1435 unsigned int tx_bytes = 0;
1436 dma_addr_t data_dma_addr;
1437 struct netdev_queue *txq;
1438 unsigned long lpar_rc;
1439 union sub_crq tx_crq;
1440 unsigned int offset;
1441 int num_entries = 1;
1442 unsigned char *dst;
1443 u64 *handle_array;
1444 int index = 0;
1445 u8 proto = 0;
1446 netdev_tx_t ret = NETDEV_TX_OK;
1447
1448 if (adapter->resetting) {
1449 if (!netif_subqueue_stopped(netdev, skb))
1450 netif_stop_subqueue(netdev, queue_num);
1451 dev_kfree_skb_any(skb);
1452
1453 tx_send_failed++;
1454 tx_dropped++;
1455 ret = NETDEV_TX_OK;
1456 goto out;
1457 }
1458
1459 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1460 tx_dropped++;
1461 tx_send_failed++;
1462 ret = NETDEV_TX_OK;
1463 goto out;
1464 }
1465 if (skb_is_gso(skb))
1466 tx_pool = &adapter->tso_pool[queue_num];
1467 else
1468 tx_pool = &adapter->tx_pool[queue_num];
1469
1470 tx_scrq = adapter->tx_scrq[queue_num];
1471 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1472 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1473 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1474
1475 index = tx_pool->free_map[tx_pool->consumer_index];
1476
1477 if (index == IBMVNIC_INVALID_MAP) {
1478 dev_kfree_skb_any(skb);
1479 tx_send_failed++;
1480 tx_dropped++;
1481 ret = NETDEV_TX_OK;
1482 goto out;
1483 }
1484
1485 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1486
1487 offset = index * tx_pool->buf_size;
1488 dst = tx_pool->long_term_buff.buff + offset;
1489 memset(dst, 0, tx_pool->buf_size);
1490 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1491
1492 if (skb_shinfo(skb)->nr_frags) {
1493 int cur, i;
1494
1495
1496 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1497 cur = skb_headlen(skb);
1498
1499
1500 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1501 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1502
1503 memcpy(dst + cur,
1504 page_address(skb_frag_page(frag)) +
1505 frag->page_offset, skb_frag_size(frag));
1506 cur += skb_frag_size(frag);
1507 }
1508 } else {
1509 skb_copy_from_linear_data(skb, dst, skb->len);
1510 }
1511
1512 tx_pool->consumer_index =
1513 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1514
1515 tx_buff = &tx_pool->tx_buff[index];
1516 tx_buff->skb = skb;
1517 tx_buff->data_dma[0] = data_dma_addr;
1518 tx_buff->data_len[0] = skb->len;
1519 tx_buff->index = index;
1520 tx_buff->pool_index = queue_num;
1521 tx_buff->last_frag = true;
1522
1523 memset(&tx_crq, 0, sizeof(tx_crq));
1524 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1525 tx_crq.v1.type = IBMVNIC_TX_DESC;
1526 tx_crq.v1.n_crq_elem = 1;
1527 tx_crq.v1.n_sge = 1;
1528 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1529
1530 if (skb_is_gso(skb))
1531 tx_crq.v1.correlator =
1532 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1533 else
1534 tx_crq.v1.correlator = cpu_to_be32(index);
1535 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1536 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1537 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1538
1539 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1540 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1541 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1542 }
1543
1544 if (skb->protocol == htons(ETH_P_IP)) {
1545 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1546 proto = ip_hdr(skb)->protocol;
1547 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1548 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1549 proto = ipv6_hdr(skb)->nexthdr;
1550 }
1551
1552 if (proto == IPPROTO_TCP)
1553 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1554 else if (proto == IPPROTO_UDP)
1555 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1556
1557 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1558 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1559 hdrs += 2;
1560 }
1561 if (skb_is_gso(skb)) {
1562 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1563 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1564 hdrs += 2;
1565 }
1566
1567 if ((*hdrs >> 7) & 1) {
1568 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1569 tx_crq.v1.n_crq_elem = num_entries;
1570 tx_buff->num_entries = num_entries;
1571 tx_buff->indir_arr[0] = tx_crq;
1572 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1573 sizeof(tx_buff->indir_arr),
1574 DMA_TO_DEVICE);
1575 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1576 dev_kfree_skb_any(skb);
1577 tx_buff->skb = NULL;
1578 if (!firmware_has_feature(FW_FEATURE_CMO))
1579 dev_err(dev, "tx: unable to map descriptor array\n");
1580 tx_map_failed++;
1581 tx_dropped++;
1582 ret = NETDEV_TX_OK;
1583 goto tx_err_out;
1584 }
1585 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1586 (u64)tx_buff->indir_dma,
1587 (u64)num_entries);
1588 } else {
1589 tx_buff->num_entries = num_entries;
1590 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1591 &tx_crq);
1592 }
1593 if (lpar_rc != H_SUCCESS) {
1594 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1595 dev_err_ratelimited(dev, "tx: send failed\n");
1596 dev_kfree_skb_any(skb);
1597 tx_buff->skb = NULL;
1598
1599 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1600
1601
1602
1603
1604
1605 netif_tx_stop_all_queues(netdev);
1606 netif_carrier_off(netdev);
1607 }
1608
1609 tx_send_failed++;
1610 tx_dropped++;
1611 ret = NETDEV_TX_OK;
1612 goto tx_err_out;
1613 }
1614
1615 if (atomic_add_return(num_entries, &tx_scrq->used)
1616 >= adapter->req_tx_entries_per_subcrq) {
1617 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1618 netif_stop_subqueue(netdev, queue_num);
1619 }
1620
1621 tx_packets++;
1622 tx_bytes += skb->len;
1623 txq->trans_start = jiffies;
1624 ret = NETDEV_TX_OK;
1625 goto out;
1626
1627tx_err_out:
1628
1629 if (tx_pool->consumer_index == 0)
1630 tx_pool->consumer_index =
1631 tx_pool->num_buffers - 1;
1632 else
1633 tx_pool->consumer_index--;
1634 tx_pool->free_map[tx_pool->consumer_index] = index;
1635out:
1636 netdev->stats.tx_dropped += tx_dropped;
1637 netdev->stats.tx_bytes += tx_bytes;
1638 netdev->stats.tx_packets += tx_packets;
1639 adapter->tx_send_failed += tx_send_failed;
1640 adapter->tx_map_failed += tx_map_failed;
1641 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1642 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1643 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1644
1645 return ret;
1646}
1647
1648static void ibmvnic_set_multi(struct net_device *netdev)
1649{
1650 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1651 struct netdev_hw_addr *ha;
1652 union ibmvnic_crq crq;
1653
1654 memset(&crq, 0, sizeof(crq));
1655 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1656 crq.request_capability.cmd = REQUEST_CAPABILITY;
1657
1658 if (netdev->flags & IFF_PROMISC) {
1659 if (!adapter->promisc_supported)
1660 return;
1661 } else {
1662 if (netdev->flags & IFF_ALLMULTI) {
1663
1664 memset(&crq, 0, sizeof(crq));
1665 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1666 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1667 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1668 ibmvnic_send_crq(adapter, &crq);
1669 } else if (netdev_mc_empty(netdev)) {
1670
1671 memset(&crq, 0, sizeof(crq));
1672 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1673 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1674 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1675 ibmvnic_send_crq(adapter, &crq);
1676 } else {
1677
1678 netdev_for_each_mc_addr(ha, netdev) {
1679 memset(&crq, 0, sizeof(crq));
1680 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1681 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1682 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1683 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1684 ha->addr);
1685 ibmvnic_send_crq(adapter, &crq);
1686 }
1687 }
1688 }
1689}
1690
1691static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1692{
1693 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1694 struct sockaddr *addr = p;
1695 union ibmvnic_crq crq;
1696 int rc;
1697
1698 if (!is_valid_ether_addr(addr->sa_data))
1699 return -EADDRNOTAVAIL;
1700
1701 memset(&crq, 0, sizeof(crq));
1702 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1703 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1704 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1705
1706 init_completion(&adapter->fw_done);
1707 rc = ibmvnic_send_crq(adapter, &crq);
1708 if (rc)
1709 return rc;
1710 wait_for_completion(&adapter->fw_done);
1711
1712 return adapter->fw_done_rc ? -EIO : 0;
1713}
1714
1715static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1716{
1717 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1718 struct sockaddr *addr = p;
1719 int rc;
1720
1721 if (adapter->state == VNIC_PROBED) {
1722 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1723 adapter->mac_change_pending = true;
1724 return 0;
1725 }
1726
1727 rc = __ibmvnic_set_mac(netdev, addr);
1728
1729 return rc;
1730}
1731
1732
1733
1734
1735
1736static int do_reset(struct ibmvnic_adapter *adapter,
1737 struct ibmvnic_rwi *rwi, u32 reset_state)
1738{
1739 u64 old_num_rx_queues, old_num_tx_queues;
1740 u64 old_num_rx_slots, old_num_tx_slots;
1741 struct net_device *netdev = adapter->netdev;
1742 int i, rc;
1743
1744 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1745 rwi->reset_reason);
1746
1747 netif_carrier_off(netdev);
1748 adapter->reset_reason = rwi->reset_reason;
1749
1750 old_num_rx_queues = adapter->req_rx_queues;
1751 old_num_tx_queues = adapter->req_tx_queues;
1752 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1753 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1754
1755 ibmvnic_cleanup(netdev);
1756
1757 if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
1758 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1759 rc = __ibmvnic_close(netdev);
1760 if (rc)
1761 return rc;
1762 }
1763
1764 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1765 adapter->wait_for_reset) {
1766 release_resources(adapter);
1767 release_sub_crqs(adapter, 1);
1768 release_crq_queue(adapter);
1769 }
1770
1771 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1772
1773
1774
1775 adapter->state = VNIC_PROBED;
1776
1777 if (adapter->wait_for_reset) {
1778 rc = init_crq_queue(adapter);
1779 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1780 rc = ibmvnic_reenable_crq_queue(adapter);
1781 release_sub_crqs(adapter, 1);
1782 } else {
1783 rc = ibmvnic_reset_crq(adapter);
1784 if (!rc)
1785 rc = vio_enable_interrupts(adapter->vdev);
1786 }
1787
1788 if (rc) {
1789 netdev_err(adapter->netdev,
1790 "Couldn't initialize crq. rc=%d\n", rc);
1791 return rc;
1792 }
1793
1794 rc = ibmvnic_reset_init(adapter);
1795 if (rc)
1796 return IBMVNIC_INIT_FAILED;
1797
1798
1799
1800
1801 if (reset_state == VNIC_PROBED)
1802 return 0;
1803
1804 rc = ibmvnic_login(netdev);
1805 if (rc) {
1806 adapter->state = reset_state;
1807 return rc;
1808 }
1809
1810 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1811 adapter->wait_for_reset) {
1812 rc = init_resources(adapter);
1813 if (rc)
1814 return rc;
1815 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1816 adapter->req_tx_queues != old_num_tx_queues ||
1817 adapter->req_rx_add_entries_per_subcrq !=
1818 old_num_rx_slots ||
1819 adapter->req_tx_entries_per_subcrq !=
1820 old_num_tx_slots) {
1821 release_rx_pools(adapter);
1822 release_tx_pools(adapter);
1823 release_napi(adapter);
1824 release_vpd_data(adapter);
1825
1826 rc = init_resources(adapter);
1827 if (rc)
1828 return rc;
1829
1830 } else {
1831 rc = reset_tx_pools(adapter);
1832 if (rc)
1833 return rc;
1834
1835 rc = reset_rx_pools(adapter);
1836 if (rc)
1837 return rc;
1838 }
1839 ibmvnic_disable_irqs(adapter);
1840 }
1841 adapter->state = VNIC_CLOSED;
1842
1843 if (reset_state == VNIC_CLOSED)
1844 return 0;
1845
1846 rc = __ibmvnic_open(netdev);
1847 if (rc) {
1848 if (list_empty(&adapter->rwi_list))
1849 adapter->state = VNIC_CLOSED;
1850 else
1851 adapter->state = reset_state;
1852
1853 return 0;
1854 }
1855
1856
1857 for (i = 0; i < adapter->req_rx_queues; i++)
1858 napi_schedule(&adapter->napi[i]);
1859
1860 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1861 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1862 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1863
1864 netif_carrier_on(netdev);
1865
1866 return 0;
1867}
1868
1869static int do_hard_reset(struct ibmvnic_adapter *adapter,
1870 struct ibmvnic_rwi *rwi, u32 reset_state)
1871{
1872 struct net_device *netdev = adapter->netdev;
1873 int rc;
1874
1875 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1876 rwi->reset_reason);
1877
1878 netif_carrier_off(netdev);
1879 adapter->reset_reason = rwi->reset_reason;
1880
1881 ibmvnic_cleanup(netdev);
1882 release_resources(adapter);
1883 release_sub_crqs(adapter, 0);
1884 release_crq_queue(adapter);
1885
1886
1887
1888
1889 adapter->state = VNIC_PROBED;
1890
1891 rc = init_crq_queue(adapter);
1892 if (rc) {
1893 netdev_err(adapter->netdev,
1894 "Couldn't initialize crq. rc=%d\n", rc);
1895 return rc;
1896 }
1897
1898 rc = ibmvnic_init(adapter);
1899 if (rc)
1900 return rc;
1901
1902
1903
1904
1905 if (reset_state == VNIC_PROBED)
1906 return 0;
1907
1908 rc = ibmvnic_login(netdev);
1909 if (rc) {
1910 adapter->state = VNIC_PROBED;
1911 return 0;
1912 }
1913
1914 rc = init_resources(adapter);
1915 if (rc)
1916 return rc;
1917
1918 ibmvnic_disable_irqs(adapter);
1919 adapter->state = VNIC_CLOSED;
1920
1921 if (reset_state == VNIC_CLOSED)
1922 return 0;
1923
1924 rc = __ibmvnic_open(netdev);
1925 if (rc) {
1926 if (list_empty(&adapter->rwi_list))
1927 adapter->state = VNIC_CLOSED;
1928 else
1929 adapter->state = reset_state;
1930
1931 return 0;
1932 }
1933
1934 netif_carrier_on(netdev);
1935
1936 return 0;
1937}
1938
1939static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1940{
1941 struct ibmvnic_rwi *rwi;
1942 unsigned long flags;
1943
1944 spin_lock_irqsave(&adapter->rwi_lock, flags);
1945
1946 if (!list_empty(&adapter->rwi_list)) {
1947 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1948 list);
1949 list_del(&rwi->list);
1950 } else {
1951 rwi = NULL;
1952 }
1953
1954 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
1955 return rwi;
1956}
1957
1958static void free_all_rwi(struct ibmvnic_adapter *adapter)
1959{
1960 struct ibmvnic_rwi *rwi;
1961
1962 rwi = get_next_rwi(adapter);
1963 while (rwi) {
1964 kfree(rwi);
1965 rwi = get_next_rwi(adapter);
1966 }
1967}
1968
1969static void __ibmvnic_reset(struct work_struct *work)
1970{
1971 struct ibmvnic_rwi *rwi;
1972 struct ibmvnic_adapter *adapter;
1973 struct net_device *netdev;
1974 bool we_lock_rtnl = false;
1975 u32 reset_state;
1976 int rc = 0;
1977
1978 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1979 netdev = adapter->netdev;
1980
1981
1982
1983
1984
1985 if (!adapter->wait_for_reset) {
1986 rtnl_lock();
1987 we_lock_rtnl = true;
1988 }
1989 reset_state = adapter->state;
1990
1991 rwi = get_next_rwi(adapter);
1992 while (rwi) {
1993 if (adapter->force_reset_recovery) {
1994 adapter->force_reset_recovery = false;
1995 rc = do_hard_reset(adapter, rwi, reset_state);
1996 } else {
1997 rc = do_reset(adapter, rwi, reset_state);
1998 }
1999 kfree(rwi);
2000 if (rc && rc != IBMVNIC_INIT_FAILED &&
2001 !adapter->force_reset_recovery)
2002 break;
2003
2004 rwi = get_next_rwi(adapter);
2005 }
2006
2007 if (adapter->wait_for_reset) {
2008 adapter->wait_for_reset = false;
2009 adapter->reset_done_rc = rc;
2010 complete(&adapter->reset_done);
2011 }
2012
2013 if (rc) {
2014 netdev_dbg(adapter->netdev, "Reset failed\n");
2015 free_all_rwi(adapter);
2016 }
2017
2018 adapter->resetting = false;
2019 if (we_lock_rtnl)
2020 rtnl_unlock();
2021}
2022
2023static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2024 enum ibmvnic_reset_reason reason)
2025{
2026 struct list_head *entry, *tmp_entry;
2027 struct ibmvnic_rwi *rwi, *tmp;
2028 struct net_device *netdev = adapter->netdev;
2029 unsigned long flags;
2030 int ret;
2031
2032 if (adapter->state == VNIC_REMOVING ||
2033 adapter->state == VNIC_REMOVED ||
2034 adapter->failover_pending) {
2035 ret = EBUSY;
2036 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2037 goto err;
2038 }
2039
2040 if (adapter->state == VNIC_PROBING) {
2041 netdev_warn(netdev, "Adapter reset during probe\n");
2042 ret = adapter->init_done_rc = EAGAIN;
2043 goto err;
2044 }
2045
2046 spin_lock_irqsave(&adapter->rwi_lock, flags);
2047
2048 list_for_each(entry, &adapter->rwi_list) {
2049 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2050 if (tmp->reset_reason == reason) {
2051 netdev_dbg(netdev, "Skipping matching reset\n");
2052 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2053 ret = EBUSY;
2054 goto err;
2055 }
2056 }
2057
2058 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2059 if (!rwi) {
2060 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2061 ibmvnic_close(netdev);
2062 ret = ENOMEM;
2063 goto err;
2064 }
2065
2066
2067
2068 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2069 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2070 list_del(entry);
2071 }
2072 rwi->reset_reason = reason;
2073 list_add_tail(&rwi->list, &adapter->rwi_list);
2074 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2075 adapter->resetting = true;
2076 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2077 schedule_work(&adapter->ibmvnic_reset);
2078
2079 return 0;
2080err:
2081 if (adapter->wait_for_reset)
2082 adapter->wait_for_reset = false;
2083 return -ret;
2084}
2085
2086static void ibmvnic_tx_timeout(struct net_device *dev)
2087{
2088 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2089
2090 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2091}
2092
2093static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2094 struct ibmvnic_rx_buff *rx_buff)
2095{
2096 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2097
2098 rx_buff->skb = NULL;
2099
2100 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2101 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2102
2103 atomic_dec(&pool->available);
2104}
2105
2106static int ibmvnic_poll(struct napi_struct *napi, int budget)
2107{
2108 struct net_device *netdev = napi->dev;
2109 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2110 int scrq_num = (int)(napi - adapter->napi);
2111 int frames_processed = 0;
2112
2113restart_poll:
2114 while (frames_processed < budget) {
2115 struct sk_buff *skb;
2116 struct ibmvnic_rx_buff *rx_buff;
2117 union sub_crq *next;
2118 u32 length;
2119 u16 offset;
2120 u8 flags = 0;
2121
2122 if (unlikely(adapter->resetting &&
2123 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2124 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2125 napi_complete_done(napi, frames_processed);
2126 return frames_processed;
2127 }
2128
2129 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2130 break;
2131 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2132 rx_buff =
2133 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2134 rx_comp.correlator);
2135
2136 if (next->rx_comp.rc) {
2137 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2138 be16_to_cpu(next->rx_comp.rc));
2139
2140 next->rx_comp.first = 0;
2141 dev_kfree_skb_any(rx_buff->skb);
2142 remove_buff_from_pool(adapter, rx_buff);
2143 continue;
2144 } else if (!rx_buff->skb) {
2145
2146 next->rx_comp.first = 0;
2147 remove_buff_from_pool(adapter, rx_buff);
2148 continue;
2149 }
2150
2151 length = be32_to_cpu(next->rx_comp.len);
2152 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2153 flags = next->rx_comp.flags;
2154 skb = rx_buff->skb;
2155 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2156 length);
2157
2158
2159
2160
2161 if (adapter->rx_vlan_header_insertion &&
2162 (flags & IBMVNIC_VLAN_STRIPPED))
2163 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2164 ntohs(next->rx_comp.vlan_tci));
2165
2166
2167 next->rx_comp.first = 0;
2168 remove_buff_from_pool(adapter, rx_buff);
2169
2170 skb_put(skb, length);
2171 skb->protocol = eth_type_trans(skb, netdev);
2172 skb_record_rx_queue(skb, scrq_num);
2173
2174 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2175 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2176 skb->ip_summed = CHECKSUM_UNNECESSARY;
2177 }
2178
2179 length = skb->len;
2180 napi_gro_receive(napi, skb);
2181 netdev->stats.rx_packets++;
2182 netdev->stats.rx_bytes += length;
2183 adapter->rx_stats_buffers[scrq_num].packets++;
2184 adapter->rx_stats_buffers[scrq_num].bytes += length;
2185 frames_processed++;
2186 }
2187
2188 if (adapter->state != VNIC_CLOSING)
2189 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2190
2191 if (frames_processed < budget) {
2192 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2193 napi_complete_done(napi, frames_processed);
2194 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2195 napi_reschedule(napi)) {
2196 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2197 goto restart_poll;
2198 }
2199 }
2200 return frames_processed;
2201}
2202
2203static int wait_for_reset(struct ibmvnic_adapter *adapter)
2204{
2205 int rc, ret;
2206
2207 adapter->fallback.mtu = adapter->req_mtu;
2208 adapter->fallback.rx_queues = adapter->req_rx_queues;
2209 adapter->fallback.tx_queues = adapter->req_tx_queues;
2210 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2211 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2212
2213 init_completion(&adapter->reset_done);
2214 adapter->wait_for_reset = true;
2215 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2216 if (rc)
2217 return rc;
2218 wait_for_completion(&adapter->reset_done);
2219
2220 ret = 0;
2221 if (adapter->reset_done_rc) {
2222 ret = -EIO;
2223 adapter->desired.mtu = adapter->fallback.mtu;
2224 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2225 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2226 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2227 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2228
2229 init_completion(&adapter->reset_done);
2230 adapter->wait_for_reset = true;
2231 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2232 if (rc)
2233 return ret;
2234 wait_for_completion(&adapter->reset_done);
2235 }
2236 adapter->wait_for_reset = false;
2237
2238 return ret;
2239}
2240
2241static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2242{
2243 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2244
2245 adapter->desired.mtu = new_mtu + ETH_HLEN;
2246
2247 return wait_for_reset(adapter);
2248}
2249
2250static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2251 struct net_device *dev,
2252 netdev_features_t features)
2253{
2254
2255
2256
2257
2258 if (skb_is_gso(skb)) {
2259 if (skb_shinfo(skb)->gso_size < 224 ||
2260 skb_shinfo(skb)->gso_segs == 1)
2261 features &= ~NETIF_F_GSO_MASK;
2262 }
2263
2264 return features;
2265}
2266
2267static const struct net_device_ops ibmvnic_netdev_ops = {
2268 .ndo_open = ibmvnic_open,
2269 .ndo_stop = ibmvnic_close,
2270 .ndo_start_xmit = ibmvnic_xmit,
2271 .ndo_set_rx_mode = ibmvnic_set_multi,
2272 .ndo_set_mac_address = ibmvnic_set_mac,
2273 .ndo_validate_addr = eth_validate_addr,
2274 .ndo_tx_timeout = ibmvnic_tx_timeout,
2275 .ndo_change_mtu = ibmvnic_change_mtu,
2276 .ndo_features_check = ibmvnic_features_check,
2277};
2278
2279
2280
2281static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2282 struct ethtool_link_ksettings *cmd)
2283{
2284 u32 supported, advertising;
2285
2286 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2287 SUPPORTED_FIBRE);
2288 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2289 ADVERTISED_FIBRE);
2290 cmd->base.speed = SPEED_1000;
2291 cmd->base.duplex = DUPLEX_FULL;
2292 cmd->base.port = PORT_FIBRE;
2293 cmd->base.phy_address = 0;
2294 cmd->base.autoneg = AUTONEG_ENABLE;
2295
2296 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2297 supported);
2298 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2299 advertising);
2300
2301 return 0;
2302}
2303
2304static void ibmvnic_get_drvinfo(struct net_device *netdev,
2305 struct ethtool_drvinfo *info)
2306{
2307 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2308
2309 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2310 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2311 strlcpy(info->fw_version, adapter->fw_version,
2312 sizeof(info->fw_version));
2313}
2314
2315static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2316{
2317 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2318
2319 return adapter->msg_enable;
2320}
2321
2322static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2323{
2324 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2325
2326 adapter->msg_enable = data;
2327}
2328
2329static u32 ibmvnic_get_link(struct net_device *netdev)
2330{
2331 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2332
2333
2334
2335
2336 return adapter->logical_link_state;
2337}
2338
2339static void ibmvnic_get_ringparam(struct net_device *netdev,
2340 struct ethtool_ringparam *ring)
2341{
2342 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2343
2344 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2345 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2346 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2347 } else {
2348 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2349 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2350 }
2351 ring->rx_mini_max_pending = 0;
2352 ring->rx_jumbo_max_pending = 0;
2353 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2354 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2355 ring->rx_mini_pending = 0;
2356 ring->rx_jumbo_pending = 0;
2357}
2358
2359static int ibmvnic_set_ringparam(struct net_device *netdev,
2360 struct ethtool_ringparam *ring)
2361{
2362 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2363 int ret;
2364
2365 ret = 0;
2366 adapter->desired.rx_entries = ring->rx_pending;
2367 adapter->desired.tx_entries = ring->tx_pending;
2368
2369 ret = wait_for_reset(adapter);
2370
2371 if (!ret &&
2372 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2373 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2374 netdev_info(netdev,
2375 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2376 ring->rx_pending, ring->tx_pending,
2377 adapter->req_rx_add_entries_per_subcrq,
2378 adapter->req_tx_entries_per_subcrq);
2379 return ret;
2380}
2381
2382static void ibmvnic_get_channels(struct net_device *netdev,
2383 struct ethtool_channels *channels)
2384{
2385 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2386
2387 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2388 channels->max_rx = adapter->max_rx_queues;
2389 channels->max_tx = adapter->max_tx_queues;
2390 } else {
2391 channels->max_rx = IBMVNIC_MAX_QUEUES;
2392 channels->max_tx = IBMVNIC_MAX_QUEUES;
2393 }
2394
2395 channels->max_other = 0;
2396 channels->max_combined = 0;
2397 channels->rx_count = adapter->req_rx_queues;
2398 channels->tx_count = adapter->req_tx_queues;
2399 channels->other_count = 0;
2400 channels->combined_count = 0;
2401}
2402
2403static int ibmvnic_set_channels(struct net_device *netdev,
2404 struct ethtool_channels *channels)
2405{
2406 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2407 int ret;
2408
2409 ret = 0;
2410 adapter->desired.rx_queues = channels->rx_count;
2411 adapter->desired.tx_queues = channels->tx_count;
2412
2413 ret = wait_for_reset(adapter);
2414
2415 if (!ret &&
2416 (adapter->req_rx_queues != channels->rx_count ||
2417 adapter->req_tx_queues != channels->tx_count))
2418 netdev_info(netdev,
2419 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2420 channels->rx_count, channels->tx_count,
2421 adapter->req_rx_queues, adapter->req_tx_queues);
2422 return ret;
2423
2424}
2425
2426static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2427{
2428 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2429 int i;
2430
2431 switch (stringset) {
2432 case ETH_SS_STATS:
2433 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2434 i++, data += ETH_GSTRING_LEN)
2435 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2436
2437 for (i = 0; i < adapter->req_tx_queues; i++) {
2438 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2439 data += ETH_GSTRING_LEN;
2440
2441 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2442 data += ETH_GSTRING_LEN;
2443
2444 snprintf(data, ETH_GSTRING_LEN,
2445 "tx%d_dropped_packets", i);
2446 data += ETH_GSTRING_LEN;
2447 }
2448
2449 for (i = 0; i < adapter->req_rx_queues; i++) {
2450 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2451 data += ETH_GSTRING_LEN;
2452
2453 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2454 data += ETH_GSTRING_LEN;
2455
2456 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2457 data += ETH_GSTRING_LEN;
2458 }
2459 break;
2460
2461 case ETH_SS_PRIV_FLAGS:
2462 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2463 strcpy(data + i * ETH_GSTRING_LEN,
2464 ibmvnic_priv_flags[i]);
2465 break;
2466 default:
2467 return;
2468 }
2469}
2470
2471static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2472{
2473 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2474
2475 switch (sset) {
2476 case ETH_SS_STATS:
2477 return ARRAY_SIZE(ibmvnic_stats) +
2478 adapter->req_tx_queues * NUM_TX_STATS +
2479 adapter->req_rx_queues * NUM_RX_STATS;
2480 case ETH_SS_PRIV_FLAGS:
2481 return ARRAY_SIZE(ibmvnic_priv_flags);
2482 default:
2483 return -EOPNOTSUPP;
2484 }
2485}
2486
2487static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2488 struct ethtool_stats *stats, u64 *data)
2489{
2490 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2491 union ibmvnic_crq crq;
2492 int i, j;
2493 int rc;
2494
2495 memset(&crq, 0, sizeof(crq));
2496 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2497 crq.request_statistics.cmd = REQUEST_STATISTICS;
2498 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2499 crq.request_statistics.len =
2500 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2501
2502
2503 init_completion(&adapter->stats_done);
2504 rc = ibmvnic_send_crq(adapter, &crq);
2505 if (rc)
2506 return;
2507 wait_for_completion(&adapter->stats_done);
2508
2509 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2510 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2511 ibmvnic_stats[i].offset));
2512
2513 for (j = 0; j < adapter->req_tx_queues; j++) {
2514 data[i] = adapter->tx_stats_buffers[j].packets;
2515 i++;
2516 data[i] = adapter->tx_stats_buffers[j].bytes;
2517 i++;
2518 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2519 i++;
2520 }
2521
2522 for (j = 0; j < adapter->req_rx_queues; j++) {
2523 data[i] = adapter->rx_stats_buffers[j].packets;
2524 i++;
2525 data[i] = adapter->rx_stats_buffers[j].bytes;
2526 i++;
2527 data[i] = adapter->rx_stats_buffers[j].interrupts;
2528 i++;
2529 }
2530}
2531
2532static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2533{
2534 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2535
2536 return adapter->priv_flags;
2537}
2538
2539static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2540{
2541 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2542 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2543
2544 if (which_maxes)
2545 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2546 else
2547 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2548
2549 return 0;
2550}
2551static const struct ethtool_ops ibmvnic_ethtool_ops = {
2552 .get_drvinfo = ibmvnic_get_drvinfo,
2553 .get_msglevel = ibmvnic_get_msglevel,
2554 .set_msglevel = ibmvnic_set_msglevel,
2555 .get_link = ibmvnic_get_link,
2556 .get_ringparam = ibmvnic_get_ringparam,
2557 .set_ringparam = ibmvnic_set_ringparam,
2558 .get_channels = ibmvnic_get_channels,
2559 .set_channels = ibmvnic_set_channels,
2560 .get_strings = ibmvnic_get_strings,
2561 .get_sset_count = ibmvnic_get_sset_count,
2562 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2563 .get_link_ksettings = ibmvnic_get_link_ksettings,
2564 .get_priv_flags = ibmvnic_get_priv_flags,
2565 .set_priv_flags = ibmvnic_set_priv_flags,
2566};
2567
2568
2569
2570static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2571 struct ibmvnic_sub_crq_queue *scrq)
2572{
2573 int rc;
2574
2575 if (scrq->irq) {
2576 free_irq(scrq->irq, scrq);
2577 irq_dispose_mapping(scrq->irq);
2578 scrq->irq = 0;
2579 }
2580
2581 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2582 atomic_set(&scrq->used, 0);
2583 scrq->cur = 0;
2584
2585 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2586 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2587 return rc;
2588}
2589
2590static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2591{
2592 int i, rc;
2593
2594 for (i = 0; i < adapter->req_tx_queues; i++) {
2595 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2596 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2597 if (rc)
2598 return rc;
2599 }
2600
2601 for (i = 0; i < adapter->req_rx_queues; i++) {
2602 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2603 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2604 if (rc)
2605 return rc;
2606 }
2607
2608 return rc;
2609}
2610
2611static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2612 struct ibmvnic_sub_crq_queue *scrq,
2613 bool do_h_free)
2614{
2615 struct device *dev = &adapter->vdev->dev;
2616 long rc;
2617
2618 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2619
2620 if (do_h_free) {
2621
2622 do {
2623 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2624 adapter->vdev->unit_address,
2625 scrq->crq_num);
2626 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2627
2628 if (rc) {
2629 netdev_err(adapter->netdev,
2630 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2631 scrq->crq_num, rc);
2632 }
2633 }
2634
2635 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2636 DMA_BIDIRECTIONAL);
2637 free_pages((unsigned long)scrq->msgs, 2);
2638 kfree(scrq);
2639}
2640
2641static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2642 *adapter)
2643{
2644 struct device *dev = &adapter->vdev->dev;
2645 struct ibmvnic_sub_crq_queue *scrq;
2646 int rc;
2647
2648 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2649 if (!scrq)
2650 return NULL;
2651
2652 scrq->msgs =
2653 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2654 if (!scrq->msgs) {
2655 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2656 goto zero_page_failed;
2657 }
2658
2659 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2660 DMA_BIDIRECTIONAL);
2661 if (dma_mapping_error(dev, scrq->msg_token)) {
2662 dev_warn(dev, "Couldn't map crq queue messages page\n");
2663 goto map_failed;
2664 }
2665
2666 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2667 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2668
2669 if (rc == H_RESOURCE)
2670 rc = ibmvnic_reset_crq(adapter);
2671
2672 if (rc == H_CLOSED) {
2673 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2674 } else if (rc) {
2675 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2676 goto reg_failed;
2677 }
2678
2679 scrq->adapter = adapter;
2680 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2681 spin_lock_init(&scrq->lock);
2682
2683 netdev_dbg(adapter->netdev,
2684 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2685 scrq->crq_num, scrq->hw_irq, scrq->irq);
2686
2687 return scrq;
2688
2689reg_failed:
2690 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2691 DMA_BIDIRECTIONAL);
2692map_failed:
2693 free_pages((unsigned long)scrq->msgs, 2);
2694zero_page_failed:
2695 kfree(scrq);
2696
2697 return NULL;
2698}
2699
2700static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2701{
2702 int i;
2703
2704 if (adapter->tx_scrq) {
2705 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2706 if (!adapter->tx_scrq[i])
2707 continue;
2708
2709 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2710 i);
2711 if (adapter->tx_scrq[i]->irq) {
2712 free_irq(adapter->tx_scrq[i]->irq,
2713 adapter->tx_scrq[i]);
2714 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2715 adapter->tx_scrq[i]->irq = 0;
2716 }
2717
2718 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2719 do_h_free);
2720 }
2721
2722 kfree(adapter->tx_scrq);
2723 adapter->tx_scrq = NULL;
2724 adapter->num_active_tx_scrqs = 0;
2725 }
2726
2727 if (adapter->rx_scrq) {
2728 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2729 if (!adapter->rx_scrq[i])
2730 continue;
2731
2732 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2733 i);
2734 if (adapter->rx_scrq[i]->irq) {
2735 free_irq(adapter->rx_scrq[i]->irq,
2736 adapter->rx_scrq[i]);
2737 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2738 adapter->rx_scrq[i]->irq = 0;
2739 }
2740
2741 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2742 do_h_free);
2743 }
2744
2745 kfree(adapter->rx_scrq);
2746 adapter->rx_scrq = NULL;
2747 adapter->num_active_rx_scrqs = 0;
2748 }
2749}
2750
2751static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2752 struct ibmvnic_sub_crq_queue *scrq)
2753{
2754 struct device *dev = &adapter->vdev->dev;
2755 unsigned long rc;
2756
2757 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2758 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2759 if (rc)
2760 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2761 scrq->hw_irq, rc);
2762 return rc;
2763}
2764
2765static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2766 struct ibmvnic_sub_crq_queue *scrq)
2767{
2768 struct device *dev = &adapter->vdev->dev;
2769 unsigned long rc;
2770
2771 if (scrq->hw_irq > 0x100000000ULL) {
2772 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2773 return 1;
2774 }
2775
2776 if (adapter->resetting &&
2777 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2778 u64 val = (0xff000000) | scrq->hw_irq;
2779
2780 rc = plpar_hcall_norets(H_EOI, val);
2781 if (rc)
2782 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2783 val, rc);
2784 }
2785
2786 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2787 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2788 if (rc)
2789 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2790 scrq->hw_irq, rc);
2791 return rc;
2792}
2793
2794static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2795 struct ibmvnic_sub_crq_queue *scrq)
2796{
2797 struct device *dev = &adapter->vdev->dev;
2798 struct ibmvnic_tx_pool *tx_pool;
2799 struct ibmvnic_tx_buff *txbuff;
2800 union sub_crq *next;
2801 int index;
2802 int i, j;
2803 u8 *first;
2804
2805restart_loop:
2806 while (pending_scrq(adapter, scrq)) {
2807 unsigned int pool = scrq->pool_index;
2808 int num_entries = 0;
2809
2810 next = ibmvnic_next_scrq(adapter, scrq);
2811 for (i = 0; i < next->tx_comp.num_comps; i++) {
2812 if (next->tx_comp.rcs[i]) {
2813 dev_err(dev, "tx error %x\n",
2814 next->tx_comp.rcs[i]);
2815 continue;
2816 }
2817 index = be32_to_cpu(next->tx_comp.correlators[i]);
2818 if (index & IBMVNIC_TSO_POOL_MASK) {
2819 tx_pool = &adapter->tso_pool[pool];
2820 index &= ~IBMVNIC_TSO_POOL_MASK;
2821 } else {
2822 tx_pool = &adapter->tx_pool[pool];
2823 }
2824
2825 txbuff = &tx_pool->tx_buff[index];
2826
2827 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2828 if (!txbuff->data_dma[j])
2829 continue;
2830
2831 txbuff->data_dma[j] = 0;
2832 }
2833
2834 first = &txbuff->indir_arr[0].generic.first;
2835 if (*first == IBMVNIC_CRQ_CMD) {
2836 dma_unmap_single(dev, txbuff->indir_dma,
2837 sizeof(txbuff->indir_arr),
2838 DMA_TO_DEVICE);
2839 *first = 0;
2840 }
2841
2842 if (txbuff->last_frag) {
2843 dev_kfree_skb_any(txbuff->skb);
2844 txbuff->skb = NULL;
2845 }
2846
2847 num_entries += txbuff->num_entries;
2848
2849 tx_pool->free_map[tx_pool->producer_index] = index;
2850 tx_pool->producer_index =
2851 (tx_pool->producer_index + 1) %
2852 tx_pool->num_buffers;
2853 }
2854
2855 next->tx_comp.first = 0;
2856
2857 if (atomic_sub_return(num_entries, &scrq->used) <=
2858 (adapter->req_tx_entries_per_subcrq / 2) &&
2859 __netif_subqueue_stopped(adapter->netdev,
2860 scrq->pool_index)) {
2861 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2862 netdev_dbg(adapter->netdev, "Started queue %d\n",
2863 scrq->pool_index);
2864 }
2865 }
2866
2867 enable_scrq_irq(adapter, scrq);
2868
2869 if (pending_scrq(adapter, scrq)) {
2870 disable_scrq_irq(adapter, scrq);
2871 goto restart_loop;
2872 }
2873
2874 return 0;
2875}
2876
2877static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2878{
2879 struct ibmvnic_sub_crq_queue *scrq = instance;
2880 struct ibmvnic_adapter *adapter = scrq->adapter;
2881
2882 disable_scrq_irq(adapter, scrq);
2883 ibmvnic_complete_tx(adapter, scrq);
2884
2885 return IRQ_HANDLED;
2886}
2887
2888static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2889{
2890 struct ibmvnic_sub_crq_queue *scrq = instance;
2891 struct ibmvnic_adapter *adapter = scrq->adapter;
2892
2893
2894
2895
2896 if (unlikely(adapter->state != VNIC_OPEN))
2897 return IRQ_NONE;
2898
2899 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2900
2901 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2902 disable_scrq_irq(adapter, scrq);
2903 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2904 }
2905
2906 return IRQ_HANDLED;
2907}
2908
2909static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2910{
2911 struct device *dev = &adapter->vdev->dev;
2912 struct ibmvnic_sub_crq_queue *scrq;
2913 int i = 0, j = 0;
2914 int rc = 0;
2915
2916 for (i = 0; i < adapter->req_tx_queues; i++) {
2917 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2918 i);
2919 scrq = adapter->tx_scrq[i];
2920 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2921
2922 if (!scrq->irq) {
2923 rc = -EINVAL;
2924 dev_err(dev, "Error mapping irq\n");
2925 goto req_tx_irq_failed;
2926 }
2927
2928 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2929 0, "ibmvnic_tx", scrq);
2930
2931 if (rc) {
2932 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2933 scrq->irq, rc);
2934 irq_dispose_mapping(scrq->irq);
2935 goto req_tx_irq_failed;
2936 }
2937 }
2938
2939 for (i = 0; i < adapter->req_rx_queues; i++) {
2940 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2941 i);
2942 scrq = adapter->rx_scrq[i];
2943 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2944 if (!scrq->irq) {
2945 rc = -EINVAL;
2946 dev_err(dev, "Error mapping irq\n");
2947 goto req_rx_irq_failed;
2948 }
2949 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2950 0, "ibmvnic_rx", scrq);
2951 if (rc) {
2952 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2953 scrq->irq, rc);
2954 irq_dispose_mapping(scrq->irq);
2955 goto req_rx_irq_failed;
2956 }
2957 }
2958 return rc;
2959
2960req_rx_irq_failed:
2961 for (j = 0; j < i; j++) {
2962 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2963 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2964 }
2965 i = adapter->req_tx_queues;
2966req_tx_irq_failed:
2967 for (j = 0; j < i; j++) {
2968 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2969 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2970 }
2971 release_sub_crqs(adapter, 1);
2972 return rc;
2973}
2974
2975static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2976{
2977 struct device *dev = &adapter->vdev->dev;
2978 struct ibmvnic_sub_crq_queue **allqueues;
2979 int registered_queues = 0;
2980 int total_queues;
2981 int more = 0;
2982 int i;
2983
2984 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2985
2986 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2987 if (!allqueues)
2988 return -1;
2989
2990 for (i = 0; i < total_queues; i++) {
2991 allqueues[i] = init_sub_crq_queue(adapter);
2992 if (!allqueues[i]) {
2993 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2994 break;
2995 }
2996 registered_queues++;
2997 }
2998
2999
3000 if (registered_queues <
3001 adapter->min_tx_queues + adapter->min_rx_queues) {
3002 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3003 goto tx_failed;
3004 }
3005
3006
3007 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3008 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3009 switch (i % 3) {
3010 case 0:
3011 if (adapter->req_rx_queues > adapter->min_rx_queues)
3012 adapter->req_rx_queues--;
3013 else
3014 more++;
3015 break;
3016 case 1:
3017 if (adapter->req_tx_queues > adapter->min_tx_queues)
3018 adapter->req_tx_queues--;
3019 else
3020 more++;
3021 break;
3022 }
3023 }
3024
3025 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3026 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3027 if (!adapter->tx_scrq)
3028 goto tx_failed;
3029
3030 for (i = 0; i < adapter->req_tx_queues; i++) {
3031 adapter->tx_scrq[i] = allqueues[i];
3032 adapter->tx_scrq[i]->pool_index = i;
3033 adapter->num_active_tx_scrqs++;
3034 }
3035
3036 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3037 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3038 if (!adapter->rx_scrq)
3039 goto rx_failed;
3040
3041 for (i = 0; i < adapter->req_rx_queues; i++) {
3042 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3043 adapter->rx_scrq[i]->scrq_num = i;
3044 adapter->num_active_rx_scrqs++;
3045 }
3046
3047 kfree(allqueues);
3048 return 0;
3049
3050rx_failed:
3051 kfree(adapter->tx_scrq);
3052 adapter->tx_scrq = NULL;
3053tx_failed:
3054 for (i = 0; i < registered_queues; i++)
3055 release_sub_crq_queue(adapter, allqueues[i], 1);
3056 kfree(allqueues);
3057 return -1;
3058}
3059
3060static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3061{
3062 struct device *dev = &adapter->vdev->dev;
3063 union ibmvnic_crq crq;
3064 int max_entries;
3065
3066 if (!retry) {
3067
3068 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3069
3070 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3071 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3072 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3073 return;
3074 }
3075
3076 if (adapter->desired.mtu)
3077 adapter->req_mtu = adapter->desired.mtu;
3078 else
3079 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3080
3081 if (!adapter->desired.tx_entries)
3082 adapter->desired.tx_entries =
3083 adapter->max_tx_entries_per_subcrq;
3084 if (!adapter->desired.rx_entries)
3085 adapter->desired.rx_entries =
3086 adapter->max_rx_add_entries_per_subcrq;
3087
3088 max_entries = IBMVNIC_MAX_LTB_SIZE /
3089 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3090
3091 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3092 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3093 adapter->desired.tx_entries = max_entries;
3094 }
3095
3096 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3097 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3098 adapter->desired.rx_entries = max_entries;
3099 }
3100
3101 if (adapter->desired.tx_entries)
3102 adapter->req_tx_entries_per_subcrq =
3103 adapter->desired.tx_entries;
3104 else
3105 adapter->req_tx_entries_per_subcrq =
3106 adapter->max_tx_entries_per_subcrq;
3107
3108 if (adapter->desired.rx_entries)
3109 adapter->req_rx_add_entries_per_subcrq =
3110 adapter->desired.rx_entries;
3111 else
3112 adapter->req_rx_add_entries_per_subcrq =
3113 adapter->max_rx_add_entries_per_subcrq;
3114
3115 if (adapter->desired.tx_queues)
3116 adapter->req_tx_queues =
3117 adapter->desired.tx_queues;
3118 else
3119 adapter->req_tx_queues =
3120 adapter->opt_tx_comp_sub_queues;
3121
3122 if (adapter->desired.rx_queues)
3123 adapter->req_rx_queues =
3124 adapter->desired.rx_queues;
3125 else
3126 adapter->req_rx_queues =
3127 adapter->opt_rx_comp_queues;
3128
3129 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3130 }
3131
3132 memset(&crq, 0, sizeof(crq));
3133 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3134 crq.request_capability.cmd = REQUEST_CAPABILITY;
3135
3136 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3137 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3138 atomic_inc(&adapter->running_cap_crqs);
3139 ibmvnic_send_crq(adapter, &crq);
3140
3141 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3142 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3143 atomic_inc(&adapter->running_cap_crqs);
3144 ibmvnic_send_crq(adapter, &crq);
3145
3146 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3147 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3148 atomic_inc(&adapter->running_cap_crqs);
3149 ibmvnic_send_crq(adapter, &crq);
3150
3151 crq.request_capability.capability =
3152 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3153 crq.request_capability.number =
3154 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3155 atomic_inc(&adapter->running_cap_crqs);
3156 ibmvnic_send_crq(adapter, &crq);
3157
3158 crq.request_capability.capability =
3159 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3160 crq.request_capability.number =
3161 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3162 atomic_inc(&adapter->running_cap_crqs);
3163 ibmvnic_send_crq(adapter, &crq);
3164
3165 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3166 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3167 atomic_inc(&adapter->running_cap_crqs);
3168 ibmvnic_send_crq(adapter, &crq);
3169
3170 if (adapter->netdev->flags & IFF_PROMISC) {
3171 if (adapter->promisc_supported) {
3172 crq.request_capability.capability =
3173 cpu_to_be16(PROMISC_REQUESTED);
3174 crq.request_capability.number = cpu_to_be64(1);
3175 atomic_inc(&adapter->running_cap_crqs);
3176 ibmvnic_send_crq(adapter, &crq);
3177 }
3178 } else {
3179 crq.request_capability.capability =
3180 cpu_to_be16(PROMISC_REQUESTED);
3181 crq.request_capability.number = cpu_to_be64(0);
3182 atomic_inc(&adapter->running_cap_crqs);
3183 ibmvnic_send_crq(adapter, &crq);
3184 }
3185}
3186
3187static int pending_scrq(struct ibmvnic_adapter *adapter,
3188 struct ibmvnic_sub_crq_queue *scrq)
3189{
3190 union sub_crq *entry = &scrq->msgs[scrq->cur];
3191
3192 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3193 return 1;
3194 else
3195 return 0;
3196}
3197
3198static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3199 struct ibmvnic_sub_crq_queue *scrq)
3200{
3201 union sub_crq *entry;
3202 unsigned long flags;
3203
3204 spin_lock_irqsave(&scrq->lock, flags);
3205 entry = &scrq->msgs[scrq->cur];
3206 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3207 if (++scrq->cur == scrq->size)
3208 scrq->cur = 0;
3209 } else {
3210 entry = NULL;
3211 }
3212 spin_unlock_irqrestore(&scrq->lock, flags);
3213
3214 return entry;
3215}
3216
3217static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3218{
3219 struct ibmvnic_crq_queue *queue = &adapter->crq;
3220 union ibmvnic_crq *crq;
3221
3222 crq = &queue->msgs[queue->cur];
3223 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3224 if (++queue->cur == queue->size)
3225 queue->cur = 0;
3226 } else {
3227 crq = NULL;
3228 }
3229
3230 return crq;
3231}
3232
3233static void print_subcrq_error(struct device *dev, int rc, const char *func)
3234{
3235 switch (rc) {
3236 case H_PARAMETER:
3237 dev_warn_ratelimited(dev,
3238 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3239 func, rc);
3240 break;
3241 case H_CLOSED:
3242 dev_warn_ratelimited(dev,
3243 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3244 func, rc);
3245 break;
3246 default:
3247 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3248 break;
3249 }
3250}
3251
3252static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3253 union sub_crq *sub_crq)
3254{
3255 unsigned int ua = adapter->vdev->unit_address;
3256 struct device *dev = &adapter->vdev->dev;
3257 u64 *u64_crq = (u64 *)sub_crq;
3258 int rc;
3259
3260 netdev_dbg(adapter->netdev,
3261 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3262 (unsigned long int)cpu_to_be64(remote_handle),
3263 (unsigned long int)cpu_to_be64(u64_crq[0]),
3264 (unsigned long int)cpu_to_be64(u64_crq[1]),
3265 (unsigned long int)cpu_to_be64(u64_crq[2]),
3266 (unsigned long int)cpu_to_be64(u64_crq[3]));
3267
3268
3269 mb();
3270
3271 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3272 cpu_to_be64(remote_handle),
3273 cpu_to_be64(u64_crq[0]),
3274 cpu_to_be64(u64_crq[1]),
3275 cpu_to_be64(u64_crq[2]),
3276 cpu_to_be64(u64_crq[3]));
3277
3278 if (rc)
3279 print_subcrq_error(dev, rc, __func__);
3280
3281 return rc;
3282}
3283
3284static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3285 u64 remote_handle, u64 ioba, u64 num_entries)
3286{
3287 unsigned int ua = adapter->vdev->unit_address;
3288 struct device *dev = &adapter->vdev->dev;
3289 int rc;
3290
3291
3292 mb();
3293 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3294 cpu_to_be64(remote_handle),
3295 ioba, num_entries);
3296
3297 if (rc)
3298 print_subcrq_error(dev, rc, __func__);
3299
3300 return rc;
3301}
3302
3303static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3304 union ibmvnic_crq *crq)
3305{
3306 unsigned int ua = adapter->vdev->unit_address;
3307 struct device *dev = &adapter->vdev->dev;
3308 u64 *u64_crq = (u64 *)crq;
3309 int rc;
3310
3311 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3312 (unsigned long int)cpu_to_be64(u64_crq[0]),
3313 (unsigned long int)cpu_to_be64(u64_crq[1]));
3314
3315 if (!adapter->crq.active &&
3316 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3317 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3318 return -EINVAL;
3319 }
3320
3321
3322 mb();
3323
3324 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3325 cpu_to_be64(u64_crq[0]),
3326 cpu_to_be64(u64_crq[1]));
3327
3328 if (rc) {
3329 if (rc == H_CLOSED) {
3330 dev_warn(dev, "CRQ Queue closed\n");
3331 if (adapter->resetting)
3332 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3333 }
3334
3335 dev_warn(dev, "Send error (rc=%d)\n", rc);
3336 }
3337
3338 return rc;
3339}
3340
3341static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3342{
3343 union ibmvnic_crq crq;
3344
3345 memset(&crq, 0, sizeof(crq));
3346 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3347 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3348 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3349
3350 return ibmvnic_send_crq(adapter, &crq);
3351}
3352
3353static int send_version_xchg(struct ibmvnic_adapter *adapter)
3354{
3355 union ibmvnic_crq crq;
3356
3357 memset(&crq, 0, sizeof(crq));
3358 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3359 crq.version_exchange.cmd = VERSION_EXCHANGE;
3360 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3361
3362 return ibmvnic_send_crq(adapter, &crq);
3363}
3364
3365struct vnic_login_client_data {
3366 u8 type;
3367 __be16 len;
3368 char name[];
3369} __packed;
3370
3371static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3372{
3373 int len;
3374
3375
3376
3377
3378
3379 len = 4 * sizeof(struct vnic_login_client_data);
3380 len += 6;
3381 len += strlen(utsname()->nodename) + 1;
3382 len += strlen(adapter->netdev->name) + 1;
3383
3384 return len;
3385}
3386
3387static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3388 struct vnic_login_client_data *vlcd)
3389{
3390 const char *os_name = "Linux";
3391 int len;
3392
3393
3394 vlcd->type = 1;
3395 len = strlen(os_name) + 1;
3396 vlcd->len = cpu_to_be16(len);
3397 strncpy(vlcd->name, os_name, len);
3398 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3399
3400
3401 vlcd->type = 2;
3402 len = strlen(utsname()->nodename) + 1;
3403 vlcd->len = cpu_to_be16(len);
3404 strncpy(vlcd->name, utsname()->nodename, len);
3405 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3406
3407
3408 vlcd->type = 3;
3409 len = strlen(adapter->netdev->name) + 1;
3410 vlcd->len = cpu_to_be16(len);
3411 strncpy(vlcd->name, adapter->netdev->name, len);
3412}
3413
3414static int send_login(struct ibmvnic_adapter *adapter)
3415{
3416 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3417 struct ibmvnic_login_buffer *login_buffer;
3418 struct device *dev = &adapter->vdev->dev;
3419 dma_addr_t rsp_buffer_token;
3420 dma_addr_t buffer_token;
3421 size_t rsp_buffer_size;
3422 union ibmvnic_crq crq;
3423 size_t buffer_size;
3424 __be64 *tx_list_p;
3425 __be64 *rx_list_p;
3426 int client_data_len;
3427 struct vnic_login_client_data *vlcd;
3428 int i;
3429
3430 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3431 netdev_err(adapter->netdev,
3432 "RX or TX queues are not allocated, device login failed\n");
3433 return -1;
3434 }
3435
3436 release_login_rsp_buffer(adapter);
3437 client_data_len = vnic_client_data_len(adapter);
3438
3439 buffer_size =
3440 sizeof(struct ibmvnic_login_buffer) +
3441 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3442 client_data_len;
3443
3444 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3445 if (!login_buffer)
3446 goto buf_alloc_failed;
3447
3448 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3449 DMA_TO_DEVICE);
3450 if (dma_mapping_error(dev, buffer_token)) {
3451 dev_err(dev, "Couldn't map login buffer\n");
3452 goto buf_map_failed;
3453 }
3454
3455 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3456 sizeof(u64) * adapter->req_tx_queues +
3457 sizeof(u64) * adapter->req_rx_queues +
3458 sizeof(u64) * adapter->req_rx_queues +
3459 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3460
3461 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3462 if (!login_rsp_buffer)
3463 goto buf_rsp_alloc_failed;
3464
3465 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3466 rsp_buffer_size, DMA_FROM_DEVICE);
3467 if (dma_mapping_error(dev, rsp_buffer_token)) {
3468 dev_err(dev, "Couldn't map login rsp buffer\n");
3469 goto buf_rsp_map_failed;
3470 }
3471
3472 adapter->login_buf = login_buffer;
3473 adapter->login_buf_token = buffer_token;
3474 adapter->login_buf_sz = buffer_size;
3475 adapter->login_rsp_buf = login_rsp_buffer;
3476 adapter->login_rsp_buf_token = rsp_buffer_token;
3477 adapter->login_rsp_buf_sz = rsp_buffer_size;
3478
3479 login_buffer->len = cpu_to_be32(buffer_size);
3480 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3481 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3482 login_buffer->off_txcomp_subcrqs =
3483 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3484 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3485 login_buffer->off_rxcomp_subcrqs =
3486 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3487 sizeof(u64) * adapter->req_tx_queues);
3488 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3489 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3490
3491 tx_list_p = (__be64 *)((char *)login_buffer +
3492 sizeof(struct ibmvnic_login_buffer));
3493 rx_list_p = (__be64 *)((char *)login_buffer +
3494 sizeof(struct ibmvnic_login_buffer) +
3495 sizeof(u64) * adapter->req_tx_queues);
3496
3497 for (i = 0; i < adapter->req_tx_queues; i++) {
3498 if (adapter->tx_scrq[i]) {
3499 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3500 crq_num);
3501 }
3502 }
3503
3504 for (i = 0; i < adapter->req_rx_queues; i++) {
3505 if (adapter->rx_scrq[i]) {
3506 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3507 crq_num);
3508 }
3509 }
3510
3511
3512 vlcd = (struct vnic_login_client_data *)
3513 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3514 login_buffer->client_data_offset =
3515 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3516 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3517
3518 vnic_add_client_data(adapter, vlcd);
3519
3520 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3521 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3522 netdev_dbg(adapter->netdev, "%016lx\n",
3523 ((unsigned long int *)(adapter->login_buf))[i]);
3524 }
3525
3526 memset(&crq, 0, sizeof(crq));
3527 crq.login.first = IBMVNIC_CRQ_CMD;
3528 crq.login.cmd = LOGIN;
3529 crq.login.ioba = cpu_to_be32(buffer_token);
3530 crq.login.len = cpu_to_be32(buffer_size);
3531 ibmvnic_send_crq(adapter, &crq);
3532
3533 return 0;
3534
3535buf_rsp_map_failed:
3536 kfree(login_rsp_buffer);
3537buf_rsp_alloc_failed:
3538 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3539buf_map_failed:
3540 kfree(login_buffer);
3541buf_alloc_failed:
3542 return -1;
3543}
3544
3545static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3546 u32 len, u8 map_id)
3547{
3548 union ibmvnic_crq crq;
3549
3550 memset(&crq, 0, sizeof(crq));
3551 crq.request_map.first = IBMVNIC_CRQ_CMD;
3552 crq.request_map.cmd = REQUEST_MAP;
3553 crq.request_map.map_id = map_id;
3554 crq.request_map.ioba = cpu_to_be32(addr);
3555 crq.request_map.len = cpu_to_be32(len);
3556 return ibmvnic_send_crq(adapter, &crq);
3557}
3558
3559static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3560{
3561 union ibmvnic_crq crq;
3562
3563 memset(&crq, 0, sizeof(crq));
3564 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3565 crq.request_unmap.cmd = REQUEST_UNMAP;
3566 crq.request_unmap.map_id = map_id;
3567 return ibmvnic_send_crq(adapter, &crq);
3568}
3569
3570static void send_map_query(struct ibmvnic_adapter *adapter)
3571{
3572 union ibmvnic_crq crq;
3573
3574 memset(&crq, 0, sizeof(crq));
3575 crq.query_map.first = IBMVNIC_CRQ_CMD;
3576 crq.query_map.cmd = QUERY_MAP;
3577 ibmvnic_send_crq(adapter, &crq);
3578}
3579
3580
3581static void send_cap_queries(struct ibmvnic_adapter *adapter)
3582{
3583 union ibmvnic_crq crq;
3584
3585 atomic_set(&adapter->running_cap_crqs, 0);
3586 memset(&crq, 0, sizeof(crq));
3587 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3588 crq.query_capability.cmd = QUERY_CAPABILITY;
3589
3590 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3591 atomic_inc(&adapter->running_cap_crqs);
3592 ibmvnic_send_crq(adapter, &crq);
3593
3594 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3595 atomic_inc(&adapter->running_cap_crqs);
3596 ibmvnic_send_crq(adapter, &crq);
3597
3598 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3599 atomic_inc(&adapter->running_cap_crqs);
3600 ibmvnic_send_crq(adapter, &crq);
3601
3602 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3603 atomic_inc(&adapter->running_cap_crqs);
3604 ibmvnic_send_crq(adapter, &crq);
3605
3606 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3607 atomic_inc(&adapter->running_cap_crqs);
3608 ibmvnic_send_crq(adapter, &crq);
3609
3610 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3611 atomic_inc(&adapter->running_cap_crqs);
3612 ibmvnic_send_crq(adapter, &crq);
3613
3614 crq.query_capability.capability =
3615 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3616 atomic_inc(&adapter->running_cap_crqs);
3617 ibmvnic_send_crq(adapter, &crq);
3618
3619 crq.query_capability.capability =
3620 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3621 atomic_inc(&adapter->running_cap_crqs);
3622 ibmvnic_send_crq(adapter, &crq);
3623
3624 crq.query_capability.capability =
3625 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3626 atomic_inc(&adapter->running_cap_crqs);
3627 ibmvnic_send_crq(adapter, &crq);
3628
3629 crq.query_capability.capability =
3630 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3631 atomic_inc(&adapter->running_cap_crqs);
3632 ibmvnic_send_crq(adapter, &crq);
3633
3634 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3635 atomic_inc(&adapter->running_cap_crqs);
3636 ibmvnic_send_crq(adapter, &crq);
3637
3638 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3639 atomic_inc(&adapter->running_cap_crqs);
3640 ibmvnic_send_crq(adapter, &crq);
3641
3642 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3643 atomic_inc(&adapter->running_cap_crqs);
3644 ibmvnic_send_crq(adapter, &crq);
3645
3646 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3647 atomic_inc(&adapter->running_cap_crqs);
3648 ibmvnic_send_crq(adapter, &crq);
3649
3650 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3651 atomic_inc(&adapter->running_cap_crqs);
3652 ibmvnic_send_crq(adapter, &crq);
3653
3654 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3655 atomic_inc(&adapter->running_cap_crqs);
3656 ibmvnic_send_crq(adapter, &crq);
3657
3658 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3659 atomic_inc(&adapter->running_cap_crqs);
3660 ibmvnic_send_crq(adapter, &crq);
3661
3662 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3663 atomic_inc(&adapter->running_cap_crqs);
3664 ibmvnic_send_crq(adapter, &crq);
3665
3666 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3667 atomic_inc(&adapter->running_cap_crqs);
3668 ibmvnic_send_crq(adapter, &crq);
3669
3670 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3671 atomic_inc(&adapter->running_cap_crqs);
3672 ibmvnic_send_crq(adapter, &crq);
3673
3674 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3675 atomic_inc(&adapter->running_cap_crqs);
3676 ibmvnic_send_crq(adapter, &crq);
3677
3678 crq.query_capability.capability =
3679 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3680 atomic_inc(&adapter->running_cap_crqs);
3681 ibmvnic_send_crq(adapter, &crq);
3682
3683 crq.query_capability.capability =
3684 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3685 atomic_inc(&adapter->running_cap_crqs);
3686 ibmvnic_send_crq(adapter, &crq);
3687
3688 crq.query_capability.capability =
3689 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3690 atomic_inc(&adapter->running_cap_crqs);
3691 ibmvnic_send_crq(adapter, &crq);
3692
3693 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3694 atomic_inc(&adapter->running_cap_crqs);
3695 ibmvnic_send_crq(adapter, &crq);
3696}
3697
3698static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3699 struct ibmvnic_adapter *adapter)
3700{
3701 struct device *dev = &adapter->vdev->dev;
3702
3703 if (crq->get_vpd_size_rsp.rc.code) {
3704 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3705 crq->get_vpd_size_rsp.rc.code);
3706 complete(&adapter->fw_done);
3707 return;
3708 }
3709
3710 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3711 complete(&adapter->fw_done);
3712}
3713
3714static void handle_vpd_rsp(union ibmvnic_crq *crq,
3715 struct ibmvnic_adapter *adapter)
3716{
3717 struct device *dev = &adapter->vdev->dev;
3718 unsigned char *substr = NULL;
3719 u8 fw_level_len = 0;
3720
3721 memset(adapter->fw_version, 0, 32);
3722
3723 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3724 DMA_FROM_DEVICE);
3725
3726 if (crq->get_vpd_rsp.rc.code) {
3727 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3728 crq->get_vpd_rsp.rc.code);
3729 goto complete;
3730 }
3731
3732
3733
3734
3735 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3736 if (!substr) {
3737 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3738 goto complete;
3739 }
3740
3741
3742 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3743 fw_level_len = *(substr + 2);
3744 } else {
3745 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3746 goto complete;
3747 }
3748
3749
3750 if ((substr + 3 + fw_level_len) <
3751 (adapter->vpd->buff + adapter->vpd->len)) {
3752 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3753 } else {
3754 dev_info(dev, "FW substr extrapolated VPD buff\n");
3755 }
3756
3757complete:
3758 if (adapter->fw_version[0] == '\0')
3759 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3760 complete(&adapter->fw_done);
3761}
3762
3763static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3764{
3765 struct device *dev = &adapter->vdev->dev;
3766 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3767 union ibmvnic_crq crq;
3768 int i;
3769
3770 dma_unmap_single(dev, adapter->ip_offload_tok,
3771 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3772
3773 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3774 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3775 netdev_dbg(adapter->netdev, "%016lx\n",
3776 ((unsigned long int *)(buf))[i]);
3777
3778 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3779 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3780 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3781 buf->tcp_ipv4_chksum);
3782 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3783 buf->tcp_ipv6_chksum);
3784 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3785 buf->udp_ipv4_chksum);
3786 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3787 buf->udp_ipv6_chksum);
3788 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3789 buf->large_tx_ipv4);
3790 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3791 buf->large_tx_ipv6);
3792 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3793 buf->large_rx_ipv4);
3794 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3795 buf->large_rx_ipv6);
3796 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3797 buf->max_ipv4_header_size);
3798 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3799 buf->max_ipv6_header_size);
3800 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3801 buf->max_tcp_header_size);
3802 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3803 buf->max_udp_header_size);
3804 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3805 buf->max_large_tx_size);
3806 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3807 buf->max_large_rx_size);
3808 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3809 buf->ipv6_extension_header);
3810 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3811 buf->tcp_pseudosum_req);
3812 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3813 buf->num_ipv6_ext_headers);
3814 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3815 buf->off_ipv6_ext_headers);
3816
3817 adapter->ip_offload_ctrl_tok =
3818 dma_map_single(dev, &adapter->ip_offload_ctrl,
3819 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3820
3821 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3822 dev_err(dev, "Couldn't map ip offload control buffer\n");
3823 return;
3824 }
3825
3826 adapter->ip_offload_ctrl.len =
3827 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3828 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3829 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3830 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3831 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3832 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3833 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3834 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3835 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3836 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3837
3838
3839 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3840 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3841
3842 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3843
3844 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3845 adapter->netdev->features |= NETIF_F_IP_CSUM;
3846
3847 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3848 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3849
3850 if ((adapter->netdev->features &
3851 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3852 adapter->netdev->features |= NETIF_F_RXCSUM;
3853
3854 if (buf->large_tx_ipv4)
3855 adapter->netdev->features |= NETIF_F_TSO;
3856 if (buf->large_tx_ipv6)
3857 adapter->netdev->features |= NETIF_F_TSO6;
3858
3859 adapter->netdev->hw_features |= adapter->netdev->features;
3860
3861 memset(&crq, 0, sizeof(crq));
3862 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3863 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3864 crq.control_ip_offload.len =
3865 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3866 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3867 ibmvnic_send_crq(adapter, &crq);
3868}
3869
3870static const char *ibmvnic_fw_err_cause(u16 cause)
3871{
3872 switch (cause) {
3873 case ADAPTER_PROBLEM:
3874 return "adapter problem";
3875 case BUS_PROBLEM:
3876 return "bus problem";
3877 case FW_PROBLEM:
3878 return "firmware problem";
3879 case DD_PROBLEM:
3880 return "device driver problem";
3881 case EEH_RECOVERY:
3882 return "EEH recovery";
3883 case FW_UPDATED:
3884 return "firmware updated";
3885 case LOW_MEMORY:
3886 return "low Memory";
3887 default:
3888 return "unknown";
3889 }
3890}
3891
3892static void handle_error_indication(union ibmvnic_crq *crq,
3893 struct ibmvnic_adapter *adapter)
3894{
3895 struct device *dev = &adapter->vdev->dev;
3896 u16 cause;
3897
3898 cause = be16_to_cpu(crq->error_indication.error_cause);
3899
3900 dev_warn_ratelimited(dev,
3901 "Firmware reports %serror, cause: %s. Starting recovery...\n",
3902 crq->error_indication.flags
3903 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3904 ibmvnic_fw_err_cause(cause));
3905
3906 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3907 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3908 else
3909 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3910}
3911
3912static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3913 struct ibmvnic_adapter *adapter)
3914{
3915 struct net_device *netdev = adapter->netdev;
3916 struct device *dev = &adapter->vdev->dev;
3917 long rc;
3918
3919 rc = crq->change_mac_addr_rsp.rc.code;
3920 if (rc) {
3921 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3922 goto out;
3923 }
3924 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3925 ETH_ALEN);
3926out:
3927 complete(&adapter->fw_done);
3928 return rc;
3929}
3930
3931static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3932 struct ibmvnic_adapter *adapter)
3933{
3934 struct device *dev = &adapter->vdev->dev;
3935 u64 *req_value;
3936 char *name;
3937
3938 atomic_dec(&adapter->running_cap_crqs);
3939 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3940 case REQ_TX_QUEUES:
3941 req_value = &adapter->req_tx_queues;
3942 name = "tx";
3943 break;
3944 case REQ_RX_QUEUES:
3945 req_value = &adapter->req_rx_queues;
3946 name = "rx";
3947 break;
3948 case REQ_RX_ADD_QUEUES:
3949 req_value = &adapter->req_rx_add_queues;
3950 name = "rx_add";
3951 break;
3952 case REQ_TX_ENTRIES_PER_SUBCRQ:
3953 req_value = &adapter->req_tx_entries_per_subcrq;
3954 name = "tx_entries_per_subcrq";
3955 break;
3956 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3957 req_value = &adapter->req_rx_add_entries_per_subcrq;
3958 name = "rx_add_entries_per_subcrq";
3959 break;
3960 case REQ_MTU:
3961 req_value = &adapter->req_mtu;
3962 name = "mtu";
3963 break;
3964 case PROMISC_REQUESTED:
3965 req_value = &adapter->promisc;
3966 name = "promisc";
3967 break;
3968 default:
3969 dev_err(dev, "Got invalid cap request rsp %d\n",
3970 crq->request_capability.capability);
3971 return;
3972 }
3973
3974 switch (crq->request_capability_rsp.rc.code) {
3975 case SUCCESS:
3976 break;
3977 case PARTIALSUCCESS:
3978 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3979 *req_value,
3980 (long int)be64_to_cpu(crq->request_capability_rsp.
3981 number), name);
3982
3983 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3984 REQ_MTU) {
3985 pr_err("mtu of %llu is not supported. Reverting.\n",
3986 *req_value);
3987 *req_value = adapter->fallback.mtu;
3988 } else {
3989 *req_value =
3990 be64_to_cpu(crq->request_capability_rsp.number);
3991 }
3992
3993 ibmvnic_send_req_caps(adapter, 1);
3994 return;
3995 default:
3996 dev_err(dev, "Error %d in request cap rsp\n",
3997 crq->request_capability_rsp.rc.code);
3998 return;
3999 }
4000
4001
4002 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4003 union ibmvnic_crq newcrq;
4004 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4005 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4006 &adapter->ip_offload_buf;
4007
4008 adapter->wait_capability = false;
4009 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4010 buf_sz,
4011 DMA_FROM_DEVICE);
4012
4013 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4014 if (!firmware_has_feature(FW_FEATURE_CMO))
4015 dev_err(dev, "Couldn't map offload buffer\n");
4016 return;
4017 }
4018
4019 memset(&newcrq, 0, sizeof(newcrq));
4020 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4021 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4022 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4023 newcrq.query_ip_offload.ioba =
4024 cpu_to_be32(adapter->ip_offload_tok);
4025
4026 ibmvnic_send_crq(adapter, &newcrq);
4027 }
4028}
4029
4030static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4031 struct ibmvnic_adapter *adapter)
4032{
4033 struct device *dev = &adapter->vdev->dev;
4034 struct net_device *netdev = adapter->netdev;
4035 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4036 struct ibmvnic_login_buffer *login = adapter->login_buf;
4037 int i;
4038
4039 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4040 DMA_TO_DEVICE);
4041 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4042 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4043
4044
4045
4046
4047
4048 if (login_rsp_crq->generic.rc.code) {
4049 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4050 complete(&adapter->init_done);
4051 return 0;
4052 }
4053
4054 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4055
4056 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4057 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4058 netdev_dbg(adapter->netdev, "%016lx\n",
4059 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4060 }
4061
4062
4063 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4064 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4065 adapter->req_rx_add_queues !=
4066 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4067 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4068 ibmvnic_remove(adapter->vdev);
4069 return -EIO;
4070 }
4071 release_login_buffer(adapter);
4072 complete(&adapter->init_done);
4073
4074 return 0;
4075}
4076
4077static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4078 struct ibmvnic_adapter *adapter)
4079{
4080 struct device *dev = &adapter->vdev->dev;
4081 long rc;
4082
4083 rc = crq->request_unmap_rsp.rc.code;
4084 if (rc)
4085 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4086}
4087
4088static void handle_query_map_rsp(union ibmvnic_crq *crq,
4089 struct ibmvnic_adapter *adapter)
4090{
4091 struct net_device *netdev = adapter->netdev;
4092 struct device *dev = &adapter->vdev->dev;
4093 long rc;
4094
4095 rc = crq->query_map_rsp.rc.code;
4096 if (rc) {
4097 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4098 return;
4099 }
4100 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4101 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4102 crq->query_map_rsp.free_pages);
4103}
4104
4105static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4106 struct ibmvnic_adapter *adapter)
4107{
4108 struct net_device *netdev = adapter->netdev;
4109 struct device *dev = &adapter->vdev->dev;
4110 long rc;
4111
4112 atomic_dec(&adapter->running_cap_crqs);
4113 netdev_dbg(netdev, "Outstanding queries: %d\n",
4114 atomic_read(&adapter->running_cap_crqs));
4115 rc = crq->query_capability.rc.code;
4116 if (rc) {
4117 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4118 goto out;
4119 }
4120
4121 switch (be16_to_cpu(crq->query_capability.capability)) {
4122 case MIN_TX_QUEUES:
4123 adapter->min_tx_queues =
4124 be64_to_cpu(crq->query_capability.number);
4125 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4126 adapter->min_tx_queues);
4127 break;
4128 case MIN_RX_QUEUES:
4129 adapter->min_rx_queues =
4130 be64_to_cpu(crq->query_capability.number);
4131 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4132 adapter->min_rx_queues);
4133 break;
4134 case MIN_RX_ADD_QUEUES:
4135 adapter->min_rx_add_queues =
4136 be64_to_cpu(crq->query_capability.number);
4137 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4138 adapter->min_rx_add_queues);
4139 break;
4140 case MAX_TX_QUEUES:
4141 adapter->max_tx_queues =
4142 be64_to_cpu(crq->query_capability.number);
4143 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4144 adapter->max_tx_queues);
4145 break;
4146 case MAX_RX_QUEUES:
4147 adapter->max_rx_queues =
4148 be64_to_cpu(crq->query_capability.number);
4149 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4150 adapter->max_rx_queues);
4151 break;
4152 case MAX_RX_ADD_QUEUES:
4153 adapter->max_rx_add_queues =
4154 be64_to_cpu(crq->query_capability.number);
4155 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4156 adapter->max_rx_add_queues);
4157 break;
4158 case MIN_TX_ENTRIES_PER_SUBCRQ:
4159 adapter->min_tx_entries_per_subcrq =
4160 be64_to_cpu(crq->query_capability.number);
4161 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4162 adapter->min_tx_entries_per_subcrq);
4163 break;
4164 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4165 adapter->min_rx_add_entries_per_subcrq =
4166 be64_to_cpu(crq->query_capability.number);
4167 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4168 adapter->min_rx_add_entries_per_subcrq);
4169 break;
4170 case MAX_TX_ENTRIES_PER_SUBCRQ:
4171 adapter->max_tx_entries_per_subcrq =
4172 be64_to_cpu(crq->query_capability.number);
4173 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4174 adapter->max_tx_entries_per_subcrq);
4175 break;
4176 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4177 adapter->max_rx_add_entries_per_subcrq =
4178 be64_to_cpu(crq->query_capability.number);
4179 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4180 adapter->max_rx_add_entries_per_subcrq);
4181 break;
4182 case TCP_IP_OFFLOAD:
4183 adapter->tcp_ip_offload =
4184 be64_to_cpu(crq->query_capability.number);
4185 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4186 adapter->tcp_ip_offload);
4187 break;
4188 case PROMISC_SUPPORTED:
4189 adapter->promisc_supported =
4190 be64_to_cpu(crq->query_capability.number);
4191 netdev_dbg(netdev, "promisc_supported = %lld\n",
4192 adapter->promisc_supported);
4193 break;
4194 case MIN_MTU:
4195 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4196 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4197 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4198 break;
4199 case MAX_MTU:
4200 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4201 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4202 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4203 break;
4204 case MAX_MULTICAST_FILTERS:
4205 adapter->max_multicast_filters =
4206 be64_to_cpu(crq->query_capability.number);
4207 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4208 adapter->max_multicast_filters);
4209 break;
4210 case VLAN_HEADER_INSERTION:
4211 adapter->vlan_header_insertion =
4212 be64_to_cpu(crq->query_capability.number);
4213 if (adapter->vlan_header_insertion)
4214 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4215 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4216 adapter->vlan_header_insertion);
4217 break;
4218 case RX_VLAN_HEADER_INSERTION:
4219 adapter->rx_vlan_header_insertion =
4220 be64_to_cpu(crq->query_capability.number);
4221 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4222 adapter->rx_vlan_header_insertion);
4223 break;
4224 case MAX_TX_SG_ENTRIES:
4225 adapter->max_tx_sg_entries =
4226 be64_to_cpu(crq->query_capability.number);
4227 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4228 adapter->max_tx_sg_entries);
4229 break;
4230 case RX_SG_SUPPORTED:
4231 adapter->rx_sg_supported =
4232 be64_to_cpu(crq->query_capability.number);
4233 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4234 adapter->rx_sg_supported);
4235 break;
4236 case OPT_TX_COMP_SUB_QUEUES:
4237 adapter->opt_tx_comp_sub_queues =
4238 be64_to_cpu(crq->query_capability.number);
4239 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4240 adapter->opt_tx_comp_sub_queues);
4241 break;
4242 case OPT_RX_COMP_QUEUES:
4243 adapter->opt_rx_comp_queues =
4244 be64_to_cpu(crq->query_capability.number);
4245 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4246 adapter->opt_rx_comp_queues);
4247 break;
4248 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4249 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4250 be64_to_cpu(crq->query_capability.number);
4251 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4252 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4253 break;
4254 case OPT_TX_ENTRIES_PER_SUBCRQ:
4255 adapter->opt_tx_entries_per_subcrq =
4256 be64_to_cpu(crq->query_capability.number);
4257 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4258 adapter->opt_tx_entries_per_subcrq);
4259 break;
4260 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4261 adapter->opt_rxba_entries_per_subcrq =
4262 be64_to_cpu(crq->query_capability.number);
4263 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4264 adapter->opt_rxba_entries_per_subcrq);
4265 break;
4266 case TX_RX_DESC_REQ:
4267 adapter->tx_rx_desc_req = crq->query_capability.number;
4268 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4269 adapter->tx_rx_desc_req);
4270 break;
4271
4272 default:
4273 netdev_err(netdev, "Got invalid cap rsp %d\n",
4274 crq->query_capability.capability);
4275 }
4276
4277out:
4278 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4279 adapter->wait_capability = false;
4280 ibmvnic_send_req_caps(adapter, 0);
4281 }
4282}
4283
4284static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4285 struct ibmvnic_adapter *adapter)
4286{
4287 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4288 struct net_device *netdev = adapter->netdev;
4289 struct device *dev = &adapter->vdev->dev;
4290 u64 *u64_crq = (u64 *)crq;
4291 long rc;
4292
4293 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4294 (unsigned long int)cpu_to_be64(u64_crq[0]),
4295 (unsigned long int)cpu_to_be64(u64_crq[1]));
4296 switch (gen_crq->first) {
4297 case IBMVNIC_CRQ_INIT_RSP:
4298 switch (gen_crq->cmd) {
4299 case IBMVNIC_CRQ_INIT:
4300 dev_info(dev, "Partner initialized\n");
4301 adapter->from_passive_init = true;
4302 adapter->failover_pending = false;
4303 if (!completion_done(&adapter->init_done)) {
4304 complete(&adapter->init_done);
4305 adapter->init_done_rc = -EIO;
4306 }
4307 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4308 break;
4309 case IBMVNIC_CRQ_INIT_COMPLETE:
4310 dev_info(dev, "Partner initialization complete\n");
4311 adapter->crq.active = true;
4312 send_version_xchg(adapter);
4313 break;
4314 default:
4315 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4316 }
4317 return;
4318 case IBMVNIC_CRQ_XPORT_EVENT:
4319 netif_carrier_off(netdev);
4320 adapter->crq.active = false;
4321 if (adapter->resetting)
4322 adapter->force_reset_recovery = true;
4323 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4324 dev_info(dev, "Migrated, re-enabling adapter\n");
4325 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4326 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4327 dev_info(dev, "Backing device failover detected\n");
4328 adapter->failover_pending = true;
4329 } else {
4330
4331 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4332 gen_crq->cmd);
4333 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4334 }
4335 return;
4336 case IBMVNIC_CRQ_CMD_RSP:
4337 break;
4338 default:
4339 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4340 gen_crq->first);
4341 return;
4342 }
4343
4344 switch (gen_crq->cmd) {
4345 case VERSION_EXCHANGE_RSP:
4346 rc = crq->version_exchange_rsp.rc.code;
4347 if (rc) {
4348 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4349 break;
4350 }
4351 dev_info(dev, "Partner protocol version is %d\n",
4352 crq->version_exchange_rsp.version);
4353 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4354 ibmvnic_version)
4355 ibmvnic_version =
4356 be16_to_cpu(crq->version_exchange_rsp.version);
4357 send_cap_queries(adapter);
4358 break;
4359 case QUERY_CAPABILITY_RSP:
4360 handle_query_cap_rsp(crq, adapter);
4361 break;
4362 case QUERY_MAP_RSP:
4363 handle_query_map_rsp(crq, adapter);
4364 break;
4365 case REQUEST_MAP_RSP:
4366 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4367 complete(&adapter->fw_done);
4368 break;
4369 case REQUEST_UNMAP_RSP:
4370 handle_request_unmap_rsp(crq, adapter);
4371 break;
4372 case REQUEST_CAPABILITY_RSP:
4373 handle_request_cap_rsp(crq, adapter);
4374 break;
4375 case LOGIN_RSP:
4376 netdev_dbg(netdev, "Got Login Response\n");
4377 handle_login_rsp(crq, adapter);
4378 break;
4379 case LOGICAL_LINK_STATE_RSP:
4380 netdev_dbg(netdev,
4381 "Got Logical Link State Response, state: %d rc: %d\n",
4382 crq->logical_link_state_rsp.link_state,
4383 crq->logical_link_state_rsp.rc.code);
4384 adapter->logical_link_state =
4385 crq->logical_link_state_rsp.link_state;
4386 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4387 complete(&adapter->init_done);
4388 break;
4389 case LINK_STATE_INDICATION:
4390 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4391 adapter->phys_link_state =
4392 crq->link_state_indication.phys_link_state;
4393 adapter->logical_link_state =
4394 crq->link_state_indication.logical_link_state;
4395 break;
4396 case CHANGE_MAC_ADDR_RSP:
4397 netdev_dbg(netdev, "Got MAC address change Response\n");
4398 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4399 break;
4400 case ERROR_INDICATION:
4401 netdev_dbg(netdev, "Got Error Indication\n");
4402 handle_error_indication(crq, adapter);
4403 break;
4404 case REQUEST_STATISTICS_RSP:
4405 netdev_dbg(netdev, "Got Statistics Response\n");
4406 complete(&adapter->stats_done);
4407 break;
4408 case QUERY_IP_OFFLOAD_RSP:
4409 netdev_dbg(netdev, "Got Query IP offload Response\n");
4410 handle_query_ip_offload_rsp(adapter);
4411 break;
4412 case MULTICAST_CTRL_RSP:
4413 netdev_dbg(netdev, "Got multicast control Response\n");
4414 break;
4415 case CONTROL_IP_OFFLOAD_RSP:
4416 netdev_dbg(netdev, "Got Control IP offload Response\n");
4417 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4418 sizeof(adapter->ip_offload_ctrl),
4419 DMA_TO_DEVICE);
4420 complete(&adapter->init_done);
4421 break;
4422 case COLLECT_FW_TRACE_RSP:
4423 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4424 complete(&adapter->fw_done);
4425 break;
4426 case GET_VPD_SIZE_RSP:
4427 handle_vpd_size_rsp(crq, adapter);
4428 break;
4429 case GET_VPD_RSP:
4430 handle_vpd_rsp(crq, adapter);
4431 break;
4432 default:
4433 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4434 gen_crq->cmd);
4435 }
4436}
4437
4438static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4439{
4440 struct ibmvnic_adapter *adapter = instance;
4441
4442 tasklet_schedule(&adapter->tasklet);
4443 return IRQ_HANDLED;
4444}
4445
4446static void ibmvnic_tasklet(void *data)
4447{
4448 struct ibmvnic_adapter *adapter = data;
4449 struct ibmvnic_crq_queue *queue = &adapter->crq;
4450 union ibmvnic_crq *crq;
4451 unsigned long flags;
4452 bool done = false;
4453
4454 spin_lock_irqsave(&queue->lock, flags);
4455 while (!done) {
4456
4457 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4458 ibmvnic_handle_crq(crq, adapter);
4459 crq->generic.first = 0;
4460 }
4461
4462
4463
4464
4465 if (!adapter->wait_capability)
4466 done = true;
4467 }
4468
4469
4470
4471 if (atomic_read(&adapter->running_cap_crqs) != 0)
4472 adapter->wait_capability = true;
4473 spin_unlock_irqrestore(&queue->lock, flags);
4474}
4475
4476static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4477{
4478 struct vio_dev *vdev = adapter->vdev;
4479 int rc;
4480
4481 do {
4482 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4483 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4484
4485 if (rc)
4486 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4487
4488 return rc;
4489}
4490
4491static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4492{
4493 struct ibmvnic_crq_queue *crq = &adapter->crq;
4494 struct device *dev = &adapter->vdev->dev;
4495 struct vio_dev *vdev = adapter->vdev;
4496 int rc;
4497
4498
4499 do {
4500 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4501 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4502
4503
4504 memset(crq->msgs, 0, PAGE_SIZE);
4505 crq->cur = 0;
4506 crq->active = false;
4507
4508
4509 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4510 crq->msg_token, PAGE_SIZE);
4511
4512 if (rc == H_CLOSED)
4513
4514 dev_warn(dev, "Partner adapter not ready\n");
4515 else if (rc != 0)
4516 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4517
4518 return rc;
4519}
4520
4521static void release_crq_queue(struct ibmvnic_adapter *adapter)
4522{
4523 struct ibmvnic_crq_queue *crq = &adapter->crq;
4524 struct vio_dev *vdev = adapter->vdev;
4525 long rc;
4526
4527 if (!crq->msgs)
4528 return;
4529
4530 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4531 free_irq(vdev->irq, adapter);
4532 tasklet_kill(&adapter->tasklet);
4533 do {
4534 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4535 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4536
4537 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4538 DMA_BIDIRECTIONAL);
4539 free_page((unsigned long)crq->msgs);
4540 crq->msgs = NULL;
4541 crq->active = false;
4542}
4543
4544static int init_crq_queue(struct ibmvnic_adapter *adapter)
4545{
4546 struct ibmvnic_crq_queue *crq = &adapter->crq;
4547 struct device *dev = &adapter->vdev->dev;
4548 struct vio_dev *vdev = adapter->vdev;
4549 int rc, retrc = -ENOMEM;
4550
4551 if (crq->msgs)
4552 return 0;
4553
4554 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4555
4556
4557 if (!crq->msgs)
4558 return -ENOMEM;
4559
4560 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4561 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4562 DMA_BIDIRECTIONAL);
4563 if (dma_mapping_error(dev, crq->msg_token))
4564 goto map_failed;
4565
4566 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4567 crq->msg_token, PAGE_SIZE);
4568
4569 if (rc == H_RESOURCE)
4570
4571 rc = ibmvnic_reset_crq(adapter);
4572 retrc = rc;
4573
4574 if (rc == H_CLOSED) {
4575 dev_warn(dev, "Partner adapter not ready\n");
4576 } else if (rc) {
4577 dev_warn(dev, "Error %d opening adapter\n", rc);
4578 goto reg_crq_failed;
4579 }
4580
4581 retrc = 0;
4582
4583 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4584 (unsigned long)adapter);
4585
4586 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4587 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4588 adapter);
4589 if (rc) {
4590 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4591 vdev->irq, rc);
4592 goto req_irq_failed;
4593 }
4594
4595 rc = vio_enable_interrupts(vdev);
4596 if (rc) {
4597 dev_err(dev, "Error %d enabling interrupts\n", rc);
4598 goto req_irq_failed;
4599 }
4600
4601 crq->cur = 0;
4602 spin_lock_init(&crq->lock);
4603
4604 return retrc;
4605
4606req_irq_failed:
4607 tasklet_kill(&adapter->tasklet);
4608 do {
4609 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4610 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4611reg_crq_failed:
4612 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4613map_failed:
4614 free_page((unsigned long)crq->msgs);
4615 crq->msgs = NULL;
4616 return retrc;
4617}
4618
4619static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4620{
4621 struct device *dev = &adapter->vdev->dev;
4622 unsigned long timeout = msecs_to_jiffies(30000);
4623 u64 old_num_rx_queues, old_num_tx_queues;
4624 int rc;
4625
4626 adapter->from_passive_init = false;
4627
4628 old_num_rx_queues = adapter->req_rx_queues;
4629 old_num_tx_queues = adapter->req_tx_queues;
4630
4631 init_completion(&adapter->init_done);
4632 adapter->init_done_rc = 0;
4633 ibmvnic_send_crq_init(adapter);
4634 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4635 dev_err(dev, "Initialization sequence timed out\n");
4636 return -1;
4637 }
4638
4639 if (adapter->init_done_rc) {
4640 release_crq_queue(adapter);
4641 return adapter->init_done_rc;
4642 }
4643
4644 if (adapter->from_passive_init) {
4645 adapter->state = VNIC_OPEN;
4646 adapter->from_passive_init = false;
4647 return -1;
4648 }
4649
4650 if (adapter->resetting && !adapter->wait_for_reset &&
4651 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4652 if (adapter->req_rx_queues != old_num_rx_queues ||
4653 adapter->req_tx_queues != old_num_tx_queues) {
4654 release_sub_crqs(adapter, 0);
4655 rc = init_sub_crqs(adapter);
4656 } else {
4657 rc = reset_sub_crq_queues(adapter);
4658 }
4659 } else {
4660 rc = init_sub_crqs(adapter);
4661 }
4662
4663 if (rc) {
4664 dev_err(dev, "Initialization of sub crqs failed\n");
4665 release_crq_queue(adapter);
4666 return rc;
4667 }
4668
4669 rc = init_sub_crq_irqs(adapter);
4670 if (rc) {
4671 dev_err(dev, "Failed to initialize sub crq irqs\n");
4672 release_crq_queue(adapter);
4673 }
4674
4675 return rc;
4676}
4677
4678static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4679{
4680 struct device *dev = &adapter->vdev->dev;
4681 unsigned long timeout = msecs_to_jiffies(30000);
4682 int rc;
4683
4684 adapter->from_passive_init = false;
4685
4686 init_completion(&adapter->init_done);
4687 adapter->init_done_rc = 0;
4688 ibmvnic_send_crq_init(adapter);
4689 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4690 dev_err(dev, "Initialization sequence timed out\n");
4691 return -1;
4692 }
4693
4694 if (adapter->init_done_rc) {
4695 release_crq_queue(adapter);
4696 return adapter->init_done_rc;
4697 }
4698
4699 if (adapter->from_passive_init) {
4700 adapter->state = VNIC_OPEN;
4701 adapter->from_passive_init = false;
4702 return -1;
4703 }
4704
4705 rc = init_sub_crqs(adapter);
4706 if (rc) {
4707 dev_err(dev, "Initialization of sub crqs failed\n");
4708 release_crq_queue(adapter);
4709 return rc;
4710 }
4711
4712 rc = init_sub_crq_irqs(adapter);
4713 if (rc) {
4714 dev_err(dev, "Failed to initialize sub crq irqs\n");
4715 release_crq_queue(adapter);
4716 }
4717
4718 return rc;
4719}
4720
4721static struct device_attribute dev_attr_failover;
4722
4723static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4724{
4725 struct ibmvnic_adapter *adapter;
4726 struct net_device *netdev;
4727 unsigned char *mac_addr_p;
4728 int rc;
4729
4730 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4731 dev->unit_address);
4732
4733 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4734 VETH_MAC_ADDR, NULL);
4735 if (!mac_addr_p) {
4736 dev_err(&dev->dev,
4737 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4738 __FILE__, __LINE__);
4739 return 0;
4740 }
4741
4742 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4743 IBMVNIC_MAX_QUEUES);
4744 if (!netdev)
4745 return -ENOMEM;
4746
4747 adapter = netdev_priv(netdev);
4748 adapter->state = VNIC_PROBING;
4749 dev_set_drvdata(&dev->dev, netdev);
4750 adapter->vdev = dev;
4751 adapter->netdev = netdev;
4752
4753 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4754 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4755 netdev->irq = dev->irq;
4756 netdev->netdev_ops = &ibmvnic_netdev_ops;
4757 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4758 SET_NETDEV_DEV(netdev, &dev->dev);
4759
4760 spin_lock_init(&adapter->stats_lock);
4761
4762 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4763 INIT_LIST_HEAD(&adapter->rwi_list);
4764 spin_lock_init(&adapter->rwi_lock);
4765 adapter->resetting = false;
4766
4767 adapter->mac_change_pending = false;
4768
4769 do {
4770 rc = init_crq_queue(adapter);
4771 if (rc) {
4772 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4773 rc);
4774 goto ibmvnic_init_fail;
4775 }
4776
4777 rc = ibmvnic_init(adapter);
4778 if (rc && rc != EAGAIN)
4779 goto ibmvnic_init_fail;
4780 } while (rc == EAGAIN);
4781
4782 rc = init_stats_buffers(adapter);
4783 if (rc)
4784 goto ibmvnic_init_fail;
4785
4786 rc = init_stats_token(adapter);
4787 if (rc)
4788 goto ibmvnic_stats_fail;
4789
4790 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4791 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4792 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4793
4794 rc = device_create_file(&dev->dev, &dev_attr_failover);
4795 if (rc)
4796 goto ibmvnic_dev_file_err;
4797
4798 netif_carrier_off(netdev);
4799 rc = register_netdev(netdev);
4800 if (rc) {
4801 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4802 goto ibmvnic_register_fail;
4803 }
4804 dev_info(&dev->dev, "ibmvnic registered\n");
4805
4806 adapter->state = VNIC_PROBED;
4807
4808 adapter->wait_for_reset = false;
4809
4810 return 0;
4811
4812ibmvnic_register_fail:
4813 device_remove_file(&dev->dev, &dev_attr_failover);
4814
4815ibmvnic_dev_file_err:
4816 release_stats_token(adapter);
4817
4818ibmvnic_stats_fail:
4819 release_stats_buffers(adapter);
4820
4821ibmvnic_init_fail:
4822 release_sub_crqs(adapter, 1);
4823 release_crq_queue(adapter);
4824 free_netdev(netdev);
4825
4826 return rc;
4827}
4828
4829static int ibmvnic_remove(struct vio_dev *dev)
4830{
4831 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4832 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4833
4834 adapter->state = VNIC_REMOVING;
4835 rtnl_lock();
4836 unregister_netdevice(netdev);
4837
4838 release_resources(adapter);
4839 release_sub_crqs(adapter, 1);
4840 release_crq_queue(adapter);
4841
4842 release_stats_token(adapter);
4843 release_stats_buffers(adapter);
4844
4845 adapter->state = VNIC_REMOVED;
4846
4847 rtnl_unlock();
4848 device_remove_file(&dev->dev, &dev_attr_failover);
4849 free_netdev(netdev);
4850 dev_set_drvdata(&dev->dev, NULL);
4851
4852 return 0;
4853}
4854
4855static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4856 const char *buf, size_t count)
4857{
4858 struct net_device *netdev = dev_get_drvdata(dev);
4859 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4860 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4861 __be64 session_token;
4862 long rc;
4863
4864 if (!sysfs_streq(buf, "1"))
4865 return -EINVAL;
4866
4867 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4868 H_GET_SESSION_TOKEN, 0, 0, 0);
4869 if (rc) {
4870 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4871 rc);
4872 return -EINVAL;
4873 }
4874
4875 session_token = (__be64)retbuf[0];
4876 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4877 be64_to_cpu(session_token));
4878 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4879 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4880 if (rc) {
4881 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4882 rc);
4883 return -EINVAL;
4884 }
4885
4886 return count;
4887}
4888
4889static DEVICE_ATTR_WO(failover);
4890
4891static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4892{
4893 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4894 struct ibmvnic_adapter *adapter;
4895 struct iommu_table *tbl;
4896 unsigned long ret = 0;
4897 int i;
4898
4899 tbl = get_iommu_table_base(&vdev->dev);
4900
4901
4902 if (!netdev)
4903 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4904
4905 adapter = netdev_priv(netdev);
4906
4907 ret += PAGE_SIZE;
4908 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4909
4910 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4911 ret += 4 * PAGE_SIZE;
4912
4913 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4914 i++)
4915 ret += adapter->rx_pool[i].size *
4916 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4917
4918 return ret;
4919}
4920
4921static int ibmvnic_resume(struct device *dev)
4922{
4923 struct net_device *netdev = dev_get_drvdata(dev);
4924 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4925
4926 if (adapter->state != VNIC_OPEN)
4927 return 0;
4928
4929 tasklet_schedule(&adapter->tasklet);
4930
4931 return 0;
4932}
4933
4934static const struct vio_device_id ibmvnic_device_table[] = {
4935 {"network", "IBM,vnic"},
4936 {"", "" }
4937};
4938MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4939
4940static const struct dev_pm_ops ibmvnic_pm_ops = {
4941 .resume = ibmvnic_resume
4942};
4943
4944static struct vio_driver ibmvnic_driver = {
4945 .id_table = ibmvnic_device_table,
4946 .probe = ibmvnic_probe,
4947 .remove = ibmvnic_remove,
4948 .get_desired_dma = ibmvnic_get_desired_dma,
4949 .name = ibmvnic_driver_name,
4950 .pm = &ibmvnic_pm_ops,
4951};
4952
4953
4954static int __init ibmvnic_module_init(void)
4955{
4956 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4957 IBMVNIC_DRIVER_VERSION);
4958
4959 return vio_register_driver(&ibmvnic_driver);
4960}
4961
4962static void __exit ibmvnic_module_exit(void)
4963{
4964 vio_unregister_driver(&ibmvnic_driver);
4965}
4966
4967module_init(ibmvnic_module_init);
4968module_exit(ibmvnic_module_exit);
4969