1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/types.h>
29#include <linux/errno.h>
30#include <linux/dma-mapping.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <linux/mm.h>
38#include <linux/pm.h>
39#include <linux/ethtool.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/ipv6.h>
43#include <linux/slab.h>
44#include <asm/hvcall.h>
45#include <linux/atomic.h>
46#include <asm/vio.h>
47#include <asm/iommu.h>
48#include <asm/firmware.h>
49
50#include "ibmveth.h"
51
52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
55
56static struct kobj_type ktype_veth_pool;
57
58
59static const char ibmveth_driver_name[] = "ibmveth";
60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61#define ibmveth_driver_version "1.05"
62
63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65MODULE_LICENSE("GPL");
66MODULE_VERSION(ibmveth_driver_version);
67
68static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
73static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
78static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
82static bool old_large_send __read_mostly;
83module_param(old_large_send, bool, S_IRUGO);
84MODULE_PARM_DESC(old_large_send,
85 "Use old large send method on firmware that supports the new method");
86
87struct ibmveth_stat {
88 char name[ETH_GSTRING_LEN];
89 int offset;
90};
91
92#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
94
95struct ibmveth_stat ibmveth_stats[] = {
96 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
97 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
98 { "replenish_add_buff_failure",
99 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
100 { "replenish_add_buff_success",
101 IBMVETH_STAT_OFF(replenish_add_buff_success) },
102 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
103 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
104 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
105 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
106 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
107 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
108 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
109 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
110 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
111};
112
113
114static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
115{
116 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
117}
118
119static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
120{
121 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
122 IBMVETH_RXQ_TOGGLE_SHIFT;
123}
124
125static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
126{
127 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
128}
129
130static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
131{
132 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
133}
134
135static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
136{
137 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
138}
139
140static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
141{
142 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
143}
144
145static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
146{
147 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
148}
149
150
151static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
152 u32 pool_index, u32 pool_size,
153 u32 buff_size, u32 pool_active)
154{
155 pool->size = pool_size;
156 pool->index = pool_index;
157 pool->buff_size = buff_size;
158 pool->threshold = pool_size * 7 / 8;
159 pool->active = pool_active;
160}
161
162
163static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
164{
165 int i;
166
167 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
168
169 if (!pool->free_map)
170 return -1;
171
172 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
173 if (!pool->dma_addr) {
174 kfree(pool->free_map);
175 pool->free_map = NULL;
176 return -1;
177 }
178
179 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
180
181 if (!pool->skbuff) {
182 kfree(pool->dma_addr);
183 pool->dma_addr = NULL;
184
185 kfree(pool->free_map);
186 pool->free_map = NULL;
187 return -1;
188 }
189
190 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
191
192 for (i = 0; i < pool->size; ++i)
193 pool->free_map[i] = i;
194
195 atomic_set(&pool->available, 0);
196 pool->producer_index = 0;
197 pool->consumer_index = 0;
198
199 return 0;
200}
201
202static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
203{
204 unsigned long offset;
205
206 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
207 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
208}
209
210
211
212
213static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
214 struct ibmveth_buff_pool *pool)
215{
216 u32 i;
217 u32 count = pool->size - atomic_read(&pool->available);
218 u32 buffers_added = 0;
219 struct sk_buff *skb;
220 unsigned int free_index, index;
221 u64 correlator;
222 unsigned long lpar_rc;
223 dma_addr_t dma_addr;
224
225 mb();
226
227 for (i = 0; i < count; ++i) {
228 union ibmveth_buf_desc desc;
229
230 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
231
232 if (!skb) {
233 netdev_dbg(adapter->netdev,
234 "replenish: unable to allocate skb\n");
235 adapter->replenish_no_mem++;
236 break;
237 }
238
239 free_index = pool->consumer_index;
240 pool->consumer_index++;
241 if (pool->consumer_index >= pool->size)
242 pool->consumer_index = 0;
243 index = pool->free_map[free_index];
244
245 BUG_ON(index == IBM_VETH_INVALID_MAP);
246 BUG_ON(pool->skbuff[index] != NULL);
247
248 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
249 pool->buff_size, DMA_FROM_DEVICE);
250
251 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
252 goto failure;
253
254 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
255 pool->dma_addr[index] = dma_addr;
256 pool->skbuff[index] = skb;
257
258 correlator = ((u64)pool->index << 32) | index;
259 *(u64 *)skb->data = correlator;
260
261 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
262 desc.fields.address = dma_addr;
263
264 if (rx_flush) {
265 unsigned int len = min(pool->buff_size,
266 adapter->netdev->mtu +
267 IBMVETH_BUFF_OH);
268 ibmveth_flush_buffer(skb->data, len);
269 }
270 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
271 desc.desc);
272
273 if (lpar_rc != H_SUCCESS) {
274 goto failure;
275 } else {
276 buffers_added++;
277 adapter->replenish_add_buff_success++;
278 }
279 }
280
281 mb();
282 atomic_add(buffers_added, &(pool->available));
283 return;
284
285failure:
286 pool->free_map[free_index] = index;
287 pool->skbuff[index] = NULL;
288 if (pool->consumer_index == 0)
289 pool->consumer_index = pool->size - 1;
290 else
291 pool->consumer_index--;
292 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
293 dma_unmap_single(&adapter->vdev->dev,
294 pool->dma_addr[index], pool->buff_size,
295 DMA_FROM_DEVICE);
296 dev_kfree_skb_any(skb);
297 adapter->replenish_add_buff_failure++;
298
299 mb();
300 atomic_add(buffers_added, &(pool->available));
301}
302
303
304
305
306
307
308static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
309{
310 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
311
312 adapter->rx_no_buffer = be64_to_cpup(p);
313}
314
315
316static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
317{
318 int i;
319
320 adapter->replenish_task_cycles++;
321
322 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
323 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
324
325 if (pool->active &&
326 (atomic_read(&pool->available) < pool->threshold))
327 ibmveth_replenish_buffer_pool(adapter, pool);
328 }
329
330 ibmveth_update_rx_no_buffer(adapter);
331}
332
333
334static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
335 struct ibmveth_buff_pool *pool)
336{
337 int i;
338
339 kfree(pool->free_map);
340 pool->free_map = NULL;
341
342 if (pool->skbuff && pool->dma_addr) {
343 for (i = 0; i < pool->size; ++i) {
344 struct sk_buff *skb = pool->skbuff[i];
345 if (skb) {
346 dma_unmap_single(&adapter->vdev->dev,
347 pool->dma_addr[i],
348 pool->buff_size,
349 DMA_FROM_DEVICE);
350 dev_kfree_skb_any(skb);
351 pool->skbuff[i] = NULL;
352 }
353 }
354 }
355
356 if (pool->dma_addr) {
357 kfree(pool->dma_addr);
358 pool->dma_addr = NULL;
359 }
360
361 if (pool->skbuff) {
362 kfree(pool->skbuff);
363 pool->skbuff = NULL;
364 }
365}
366
367
368static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
369 u64 correlator)
370{
371 unsigned int pool = correlator >> 32;
372 unsigned int index = correlator & 0xffffffffUL;
373 unsigned int free_index;
374 struct sk_buff *skb;
375
376 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
377 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
378
379 skb = adapter->rx_buff_pool[pool].skbuff[index];
380
381 BUG_ON(skb == NULL);
382
383 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
384
385 dma_unmap_single(&adapter->vdev->dev,
386 adapter->rx_buff_pool[pool].dma_addr[index],
387 adapter->rx_buff_pool[pool].buff_size,
388 DMA_FROM_DEVICE);
389
390 free_index = adapter->rx_buff_pool[pool].producer_index;
391 adapter->rx_buff_pool[pool].producer_index++;
392 if (adapter->rx_buff_pool[pool].producer_index >=
393 adapter->rx_buff_pool[pool].size)
394 adapter->rx_buff_pool[pool].producer_index = 0;
395 adapter->rx_buff_pool[pool].free_map[free_index] = index;
396
397 mb();
398
399 atomic_dec(&(adapter->rx_buff_pool[pool].available));
400}
401
402
403static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
404{
405 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
406 unsigned int pool = correlator >> 32;
407 unsigned int index = correlator & 0xffffffffUL;
408
409 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
410 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
411
412 return adapter->rx_buff_pool[pool].skbuff[index];
413}
414
415
416static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
417{
418 u32 q_index = adapter->rx_queue.index;
419 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
420 unsigned int pool = correlator >> 32;
421 unsigned int index = correlator & 0xffffffffUL;
422 union ibmveth_buf_desc desc;
423 unsigned long lpar_rc;
424 int ret = 1;
425
426 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
427 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
428
429 if (!adapter->rx_buff_pool[pool].active) {
430 ibmveth_rxq_harvest_buffer(adapter);
431 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
432 goto out;
433 }
434
435 desc.fields.flags_len = IBMVETH_BUF_VALID |
436 adapter->rx_buff_pool[pool].buff_size;
437 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
438
439 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
440
441 if (lpar_rc != H_SUCCESS) {
442 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
443 "during recycle rc=%ld", lpar_rc);
444 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
445 ret = 0;
446 }
447
448 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
449 adapter->rx_queue.index = 0;
450 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
451 }
452
453out:
454 return ret;
455}
456
457static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
458{
459 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
460
461 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
462 adapter->rx_queue.index = 0;
463 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
464 }
465}
466
467static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
468{
469 int i;
470 struct device *dev = &adapter->vdev->dev;
471
472 if (adapter->buffer_list_addr != NULL) {
473 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
474 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
475 DMA_BIDIRECTIONAL);
476 adapter->buffer_list_dma = DMA_ERROR_CODE;
477 }
478 free_page((unsigned long)adapter->buffer_list_addr);
479 adapter->buffer_list_addr = NULL;
480 }
481
482 if (adapter->filter_list_addr != NULL) {
483 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
484 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
485 DMA_BIDIRECTIONAL);
486 adapter->filter_list_dma = DMA_ERROR_CODE;
487 }
488 free_page((unsigned long)adapter->filter_list_addr);
489 adapter->filter_list_addr = NULL;
490 }
491
492 if (adapter->rx_queue.queue_addr != NULL) {
493 dma_free_coherent(dev, adapter->rx_queue.queue_len,
494 adapter->rx_queue.queue_addr,
495 adapter->rx_queue.queue_dma);
496 adapter->rx_queue.queue_addr = NULL;
497 }
498
499 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
500 if (adapter->rx_buff_pool[i].active)
501 ibmveth_free_buffer_pool(adapter,
502 &adapter->rx_buff_pool[i]);
503
504 if (adapter->bounce_buffer != NULL) {
505 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
506 dma_unmap_single(&adapter->vdev->dev,
507 adapter->bounce_buffer_dma,
508 adapter->netdev->mtu + IBMVETH_BUFF_OH,
509 DMA_BIDIRECTIONAL);
510 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
511 }
512 kfree(adapter->bounce_buffer);
513 adapter->bounce_buffer = NULL;
514 }
515}
516
517static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
518 union ibmveth_buf_desc rxq_desc, u64 mac_address)
519{
520 int rc, try_again = 1;
521
522
523
524
525
526
527retry:
528 rc = h_register_logical_lan(adapter->vdev->unit_address,
529 adapter->buffer_list_dma, rxq_desc.desc,
530 adapter->filter_list_dma, mac_address);
531
532 if (rc != H_SUCCESS && try_again) {
533 do {
534 rc = h_free_logical_lan(adapter->vdev->unit_address);
535 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
536
537 try_again = 0;
538 goto retry;
539 }
540
541 return rc;
542}
543
544static u64 ibmveth_encode_mac_addr(u8 *mac)
545{
546 int i;
547 u64 encoded = 0;
548
549 for (i = 0; i < ETH_ALEN; i++)
550 encoded = (encoded << 8) | mac[i];
551
552 return encoded;
553}
554
555static int ibmveth_open(struct net_device *netdev)
556{
557 struct ibmveth_adapter *adapter = netdev_priv(netdev);
558 u64 mac_address;
559 int rxq_entries = 1;
560 unsigned long lpar_rc;
561 int rc;
562 union ibmveth_buf_desc rxq_desc;
563 int i;
564 struct device *dev;
565
566 netdev_dbg(netdev, "open starting\n");
567
568 napi_enable(&adapter->napi);
569
570 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
571 rxq_entries += adapter->rx_buff_pool[i].size;
572
573 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
574 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
575
576 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
577 netdev_err(netdev, "unable to allocate filter or buffer list "
578 "pages\n");
579 rc = -ENOMEM;
580 goto err_out;
581 }
582
583 dev = &adapter->vdev->dev;
584
585 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
586 rxq_entries;
587 adapter->rx_queue.queue_addr =
588 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
589 &adapter->rx_queue.queue_dma, GFP_KERNEL);
590 if (!adapter->rx_queue.queue_addr) {
591 rc = -ENOMEM;
592 goto err_out;
593 }
594
595 adapter->buffer_list_dma = dma_map_single(dev,
596 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
597 adapter->filter_list_dma = dma_map_single(dev,
598 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
599
600 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
601 (dma_mapping_error(dev, adapter->filter_list_dma))) {
602 netdev_err(netdev, "unable to map filter or buffer list "
603 "pages\n");
604 rc = -ENOMEM;
605 goto err_out;
606 }
607
608 adapter->rx_queue.index = 0;
609 adapter->rx_queue.num_slots = rxq_entries;
610 adapter->rx_queue.toggle = 1;
611
612 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
613
614 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
615 adapter->rx_queue.queue_len;
616 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
617
618 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
619 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
620 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
621
622 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
623
624 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
625
626 if (lpar_rc != H_SUCCESS) {
627 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
628 lpar_rc);
629 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
630 "desc:0x%llx MAC:0x%llx\n",
631 adapter->buffer_list_dma,
632 adapter->filter_list_dma,
633 rxq_desc.desc,
634 mac_address);
635 rc = -ENONET;
636 goto err_out;
637 }
638
639 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
640 if (!adapter->rx_buff_pool[i].active)
641 continue;
642 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
643 netdev_err(netdev, "unable to alloc pool\n");
644 adapter->rx_buff_pool[i].active = 0;
645 rc = -ENOMEM;
646 goto err_out;
647 }
648 }
649
650 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
651 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
652 netdev);
653 if (rc != 0) {
654 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
655 netdev->irq, rc);
656 do {
657 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
658 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
659
660 goto err_out;
661 }
662
663 adapter->bounce_buffer =
664 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
665 if (!adapter->bounce_buffer) {
666 rc = -ENOMEM;
667 goto err_out_free_irq;
668 }
669 adapter->bounce_buffer_dma =
670 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
671 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
672 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
673 netdev_err(netdev, "unable to map bounce buffer\n");
674 rc = -ENOMEM;
675 goto err_out_free_irq;
676 }
677
678 netdev_dbg(netdev, "initial replenish cycle\n");
679 ibmveth_interrupt(netdev->irq, netdev);
680
681 netif_start_queue(netdev);
682
683 netdev_dbg(netdev, "open complete\n");
684
685 return 0;
686
687err_out_free_irq:
688 free_irq(netdev->irq, netdev);
689err_out:
690 ibmveth_cleanup(adapter);
691 napi_disable(&adapter->napi);
692 return rc;
693}
694
695static int ibmveth_close(struct net_device *netdev)
696{
697 struct ibmveth_adapter *adapter = netdev_priv(netdev);
698 long lpar_rc;
699
700 netdev_dbg(netdev, "close starting\n");
701
702 napi_disable(&adapter->napi);
703
704 if (!adapter->pool_config)
705 netif_stop_queue(netdev);
706
707 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
708
709 do {
710 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
711 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
712
713 if (lpar_rc != H_SUCCESS) {
714 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
715 "continuing with close\n", lpar_rc);
716 }
717
718 free_irq(netdev->irq, netdev);
719
720 ibmveth_update_rx_no_buffer(adapter);
721
722 ibmveth_cleanup(adapter);
723
724 netdev_dbg(netdev, "close complete\n");
725
726 return 0;
727}
728
729static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
730{
731 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
732 SUPPORTED_FIBRE);
733 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
734 ADVERTISED_FIBRE);
735 ethtool_cmd_speed_set(cmd, SPEED_1000);
736 cmd->duplex = DUPLEX_FULL;
737 cmd->port = PORT_FIBRE;
738 cmd->phy_address = 0;
739 cmd->transceiver = XCVR_INTERNAL;
740 cmd->autoneg = AUTONEG_ENABLE;
741 cmd->maxtxpkt = 0;
742 cmd->maxrxpkt = 1;
743 return 0;
744}
745
746static void netdev_get_drvinfo(struct net_device *dev,
747 struct ethtool_drvinfo *info)
748{
749 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
750 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
751}
752
753static netdev_features_t ibmveth_fix_features(struct net_device *dev,
754 netdev_features_t features)
755{
756
757
758
759
760
761
762
763
764
765 if (!(features & NETIF_F_RXCSUM))
766 features &= ~NETIF_F_ALL_CSUM;
767
768 return features;
769}
770
771static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
772{
773 struct ibmveth_adapter *adapter = netdev_priv(dev);
774 unsigned long set_attr, clr_attr, ret_attr;
775 unsigned long set_attr6, clr_attr6;
776 long ret, ret4, ret6;
777 int rc1 = 0, rc2 = 0;
778 int restart = 0;
779
780 if (netif_running(dev)) {
781 restart = 1;
782 adapter->pool_config = 1;
783 ibmveth_close(dev);
784 adapter->pool_config = 0;
785 }
786
787 set_attr = 0;
788 clr_attr = 0;
789 set_attr6 = 0;
790 clr_attr6 = 0;
791
792 if (data) {
793 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
794 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
795 } else {
796 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
797 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
798 }
799
800 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
801
802 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
803 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
804 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
805 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
806 set_attr, &ret_attr);
807
808 if (ret4 != H_SUCCESS) {
809 netdev_err(dev, "unable to change IPv4 checksum "
810 "offload settings. %d rc=%ld\n",
811 data, ret4);
812
813 h_illan_attributes(adapter->vdev->unit_address,
814 set_attr, clr_attr, &ret_attr);
815
816 if (data == 1)
817 dev->features &= ~NETIF_F_IP_CSUM;
818
819 } else {
820 adapter->fw_ipv4_csum_support = data;
821 }
822
823 ret6 = h_illan_attributes(adapter->vdev->unit_address,
824 clr_attr6, set_attr6, &ret_attr);
825
826 if (ret6 != H_SUCCESS) {
827 netdev_err(dev, "unable to change IPv6 checksum "
828 "offload settings. %d rc=%ld\n",
829 data, ret6);
830
831 h_illan_attributes(adapter->vdev->unit_address,
832 set_attr6, clr_attr6, &ret_attr);
833
834 if (data == 1)
835 dev->features &= ~NETIF_F_IPV6_CSUM;
836
837 } else
838 adapter->fw_ipv6_csum_support = data;
839
840 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
841 adapter->rx_csum = data;
842 else
843 rc1 = -EIO;
844 } else {
845 rc1 = -EIO;
846 netdev_err(dev, "unable to change checksum offload settings."
847 " %d rc=%ld ret_attr=%lx\n", data, ret,
848 ret_attr);
849 }
850
851 if (restart)
852 rc2 = ibmveth_open(dev);
853
854 return rc1 ? rc1 : rc2;
855}
856
857static int ibmveth_set_tso(struct net_device *dev, u32 data)
858{
859 struct ibmveth_adapter *adapter = netdev_priv(dev);
860 unsigned long set_attr, clr_attr, ret_attr;
861 long ret1, ret2;
862 int rc1 = 0, rc2 = 0;
863 int restart = 0;
864
865 if (netif_running(dev)) {
866 restart = 1;
867 adapter->pool_config = 1;
868 ibmveth_close(dev);
869 adapter->pool_config = 0;
870 }
871
872 set_attr = 0;
873 clr_attr = 0;
874
875 if (data)
876 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
877 else
878 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
879
880 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
881
882 if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
883 !old_large_send) {
884 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
885 set_attr, &ret_attr);
886
887 if (ret2 != H_SUCCESS) {
888 netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
889 data, ret2);
890
891 h_illan_attributes(adapter->vdev->unit_address,
892 set_attr, clr_attr, &ret_attr);
893
894 if (data == 1)
895 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
896 rc1 = -EIO;
897
898 } else {
899 adapter->fw_large_send_support = data;
900 adapter->large_send = data;
901 }
902 } else {
903
904
905
906 if (data == 1) {
907 dev->features &= ~NETIF_F_TSO6;
908 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
909 }
910 adapter->large_send = data;
911 }
912
913 if (restart)
914 rc2 = ibmveth_open(dev);
915
916 return rc1 ? rc1 : rc2;
917}
918
919static int ibmveth_set_features(struct net_device *dev,
920 netdev_features_t features)
921{
922 struct ibmveth_adapter *adapter = netdev_priv(dev);
923 int rx_csum = !!(features & NETIF_F_RXCSUM);
924 int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
925 int rc1 = 0, rc2 = 0;
926
927 if (rx_csum != adapter->rx_csum) {
928 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
929 if (rc1 && !adapter->rx_csum)
930 dev->features =
931 features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
932 }
933
934 if (large_send != adapter->large_send) {
935 rc2 = ibmveth_set_tso(dev, large_send);
936 if (rc2 && !adapter->large_send)
937 dev->features =
938 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
939 }
940
941 return rc1 ? rc1 : rc2;
942}
943
944static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
945{
946 int i;
947
948 if (stringset != ETH_SS_STATS)
949 return;
950
951 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
952 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
953}
954
955static int ibmveth_get_sset_count(struct net_device *dev, int sset)
956{
957 switch (sset) {
958 case ETH_SS_STATS:
959 return ARRAY_SIZE(ibmveth_stats);
960 default:
961 return -EOPNOTSUPP;
962 }
963}
964
965static void ibmveth_get_ethtool_stats(struct net_device *dev,
966 struct ethtool_stats *stats, u64 *data)
967{
968 int i;
969 struct ibmveth_adapter *adapter = netdev_priv(dev);
970
971 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
972 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
973}
974
975static const struct ethtool_ops netdev_ethtool_ops = {
976 .get_drvinfo = netdev_get_drvinfo,
977 .get_settings = netdev_get_settings,
978 .get_link = ethtool_op_get_link,
979 .get_strings = ibmveth_get_strings,
980 .get_sset_count = ibmveth_get_sset_count,
981 .get_ethtool_stats = ibmveth_get_ethtool_stats,
982};
983
984static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
985{
986 return -EOPNOTSUPP;
987}
988
989#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
990
991static int ibmveth_send(struct ibmveth_adapter *adapter,
992 union ibmveth_buf_desc *descs, unsigned long mss)
993{
994 unsigned long correlator;
995 unsigned int retry_count;
996 unsigned long ret;
997
998
999
1000
1001
1002 retry_count = 1024;
1003 correlator = 0;
1004 do {
1005 ret = h_send_logical_lan(adapter->vdev->unit_address,
1006 descs[0].desc, descs[1].desc,
1007 descs[2].desc, descs[3].desc,
1008 descs[4].desc, descs[5].desc,
1009 correlator, &correlator, mss,
1010 adapter->fw_large_send_support);
1011 } while ((ret == H_BUSY) && (retry_count--));
1012
1013 if (ret != H_SUCCESS && ret != H_DROPPED) {
1014 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1015 "with rc=%ld\n", ret);
1016 return 1;
1017 }
1018
1019 return 0;
1020}
1021
1022static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1023 struct net_device *netdev)
1024{
1025 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1026 unsigned int desc_flags;
1027 union ibmveth_buf_desc descs[6];
1028 int last, i;
1029 int force_bounce = 0;
1030 dma_addr_t dma_addr;
1031 unsigned long mss = 0;
1032
1033
1034
1035
1036
1037 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1038 netdev->stats.tx_dropped++;
1039 goto out;
1040 }
1041
1042
1043 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1044 ((skb->protocol == htons(ETH_P_IP) &&
1045 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1046 (skb->protocol == htons(ETH_P_IPV6) &&
1047 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1048 skb_checksum_help(skb)) {
1049
1050 netdev_err(netdev, "tx: failed to checksum packet\n");
1051 netdev->stats.tx_dropped++;
1052 goto out;
1053 }
1054
1055 desc_flags = IBMVETH_BUF_VALID;
1056
1057 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1058 desc_flags |= IBMVETH_BUF_LRG_SND;
1059
1060 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1061 unsigned char *buf = skb_transport_header(skb) +
1062 skb->csum_offset;
1063
1064 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1065
1066
1067 buf[0] = 0;
1068 buf[1] = 0;
1069 }
1070
1071retry_bounce:
1072 memset(descs, 0, sizeof(descs));
1073
1074
1075
1076
1077
1078
1079 if (force_bounce || (!skb_is_nonlinear(skb) &&
1080 (skb->len < tx_copybreak))) {
1081 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1082 skb->len);
1083
1084 descs[0].fields.flags_len = desc_flags | skb->len;
1085 descs[0].fields.address = adapter->bounce_buffer_dma;
1086
1087 if (ibmveth_send(adapter, descs, 0)) {
1088 adapter->tx_send_failed++;
1089 netdev->stats.tx_dropped++;
1090 } else {
1091 netdev->stats.tx_packets++;
1092 netdev->stats.tx_bytes += skb->len;
1093 }
1094
1095 goto out;
1096 }
1097
1098
1099 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1100 skb_headlen(skb), DMA_TO_DEVICE);
1101 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1102 goto map_failed;
1103
1104 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1105 descs[0].fields.address = dma_addr;
1106
1107
1108 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1109 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1110
1111 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1112 skb_frag_size(frag), DMA_TO_DEVICE);
1113
1114 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1115 goto map_failed_frags;
1116
1117 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1118 descs[i+1].fields.address = dma_addr;
1119 }
1120
1121 if (skb_is_gso(skb)) {
1122 if (adapter->fw_large_send_support) {
1123 mss = (unsigned long)skb_shinfo(skb)->gso_size;
1124 adapter->tx_large_packets++;
1125 } else if (!skb_is_gso_v6(skb)) {
1126
1127
1128
1129
1130 ip_hdr(skb)->check = 0xffff;
1131 tcp_hdr(skb)->check =
1132 cpu_to_be16(skb_shinfo(skb)->gso_size);
1133 adapter->tx_large_packets++;
1134 }
1135 }
1136
1137 if (ibmveth_send(adapter, descs, mss)) {
1138 adapter->tx_send_failed++;
1139 netdev->stats.tx_dropped++;
1140 } else {
1141 netdev->stats.tx_packets++;
1142 netdev->stats.tx_bytes += skb->len;
1143 }
1144
1145 dma_unmap_single(&adapter->vdev->dev,
1146 descs[0].fields.address,
1147 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1148 DMA_TO_DEVICE);
1149
1150 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1151 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1152 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1153 DMA_TO_DEVICE);
1154
1155out:
1156 dev_consume_skb_any(skb);
1157 return NETDEV_TX_OK;
1158
1159map_failed_frags:
1160 last = i+1;
1161 for (i = 0; i < last; i++)
1162 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1163 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1164 DMA_TO_DEVICE);
1165
1166map_failed:
1167 if (!firmware_has_feature(FW_FEATURE_CMO))
1168 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1169 adapter->tx_map_failed++;
1170 skb_linearize(skb);
1171 force_bounce = 1;
1172 goto retry_bounce;
1173}
1174
1175static int ibmveth_poll(struct napi_struct *napi, int budget)
1176{
1177 struct ibmveth_adapter *adapter =
1178 container_of(napi, struct ibmveth_adapter, napi);
1179 struct net_device *netdev = adapter->netdev;
1180 int frames_processed = 0;
1181 unsigned long lpar_rc;
1182 struct iphdr *iph;
1183
1184restart_poll:
1185 while (frames_processed < budget) {
1186 if (!ibmveth_rxq_pending_buffer(adapter))
1187 break;
1188
1189 smp_rmb();
1190 if (!ibmveth_rxq_buffer_valid(adapter)) {
1191 wmb();
1192 adapter->rx_invalid_buffer++;
1193 netdev_dbg(netdev, "recycling invalid buffer\n");
1194 ibmveth_rxq_recycle_buffer(adapter);
1195 } else {
1196 struct sk_buff *skb, *new_skb;
1197 int length = ibmveth_rxq_frame_length(adapter);
1198 int offset = ibmveth_rxq_frame_offset(adapter);
1199 int csum_good = ibmveth_rxq_csum_good(adapter);
1200
1201 skb = ibmveth_rxq_get_buffer(adapter);
1202
1203 new_skb = NULL;
1204 if (length < rx_copybreak)
1205 new_skb = netdev_alloc_skb(netdev, length);
1206
1207 if (new_skb) {
1208 skb_copy_to_linear_data(new_skb,
1209 skb->data + offset,
1210 length);
1211 if (rx_flush)
1212 ibmveth_flush_buffer(skb->data,
1213 length + offset);
1214 if (!ibmveth_rxq_recycle_buffer(adapter))
1215 kfree_skb(skb);
1216 skb = new_skb;
1217 } else {
1218 ibmveth_rxq_harvest_buffer(adapter);
1219 skb_reserve(skb, offset);
1220 }
1221
1222 skb_put(skb, length);
1223 skb->protocol = eth_type_trans(skb, netdev);
1224
1225 if (csum_good) {
1226 skb->ip_summed = CHECKSUM_UNNECESSARY;
1227 if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1228 iph = (struct iphdr *)skb->data;
1229
1230
1231
1232
1233 if (iph->check == 0xffff) {
1234 iph->check = 0;
1235 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1236 adapter->rx_large_packets++;
1237 }
1238 }
1239 }
1240
1241 napi_gro_receive(napi, skb);
1242
1243 netdev->stats.rx_packets++;
1244 netdev->stats.rx_bytes += length;
1245 frames_processed++;
1246 }
1247 }
1248
1249 ibmveth_replenish_task(adapter);
1250
1251 if (frames_processed < budget) {
1252 napi_complete(napi);
1253
1254
1255
1256
1257 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1258 VIO_IRQ_ENABLE);
1259
1260 BUG_ON(lpar_rc != H_SUCCESS);
1261
1262 if (ibmveth_rxq_pending_buffer(adapter) &&
1263 napi_reschedule(napi)) {
1264 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1265 VIO_IRQ_DISABLE);
1266 goto restart_poll;
1267 }
1268 }
1269
1270 return frames_processed;
1271}
1272
1273static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1274{
1275 struct net_device *netdev = dev_instance;
1276 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1277 unsigned long lpar_rc;
1278
1279 if (napi_schedule_prep(&adapter->napi)) {
1280 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1281 VIO_IRQ_DISABLE);
1282 BUG_ON(lpar_rc != H_SUCCESS);
1283 __napi_schedule(&adapter->napi);
1284 }
1285 return IRQ_HANDLED;
1286}
1287
1288static void ibmveth_set_multicast_list(struct net_device *netdev)
1289{
1290 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1291 unsigned long lpar_rc;
1292
1293 if ((netdev->flags & IFF_PROMISC) ||
1294 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1295 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1296 IbmVethMcastEnableRecv |
1297 IbmVethMcastDisableFiltering,
1298 0);
1299 if (lpar_rc != H_SUCCESS) {
1300 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1301 "entering promisc mode\n", lpar_rc);
1302 }
1303 } else {
1304 struct netdev_hw_addr *ha;
1305
1306 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1307 IbmVethMcastEnableRecv |
1308 IbmVethMcastDisableFiltering |
1309 IbmVethMcastClearFilterTable,
1310 0);
1311 if (lpar_rc != H_SUCCESS) {
1312 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1313 "attempting to clear filter table\n",
1314 lpar_rc);
1315 }
1316
1317 netdev_for_each_mc_addr(ha, netdev) {
1318
1319 u64 mcast_addr;
1320 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1321 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1322 IbmVethMcastAddFilter,
1323 mcast_addr);
1324 if (lpar_rc != H_SUCCESS) {
1325 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1326 "when adding an entry to the filter "
1327 "table\n", lpar_rc);
1328 }
1329 }
1330
1331
1332 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1333 IbmVethMcastEnableFiltering,
1334 0);
1335 if (lpar_rc != H_SUCCESS) {
1336 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1337 "enabling filtering\n", lpar_rc);
1338 }
1339 }
1340}
1341
1342static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1343{
1344 struct ibmveth_adapter *adapter = netdev_priv(dev);
1345 struct vio_dev *viodev = adapter->vdev;
1346 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1347 int i, rc;
1348 int need_restart = 0;
1349
1350 if (new_mtu < IBMVETH_MIN_MTU)
1351 return -EINVAL;
1352
1353 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1354 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1355 break;
1356
1357 if (i == IBMVETH_NUM_BUFF_POOLS)
1358 return -EINVAL;
1359
1360
1361
1362 if (netif_running(adapter->netdev)) {
1363 need_restart = 1;
1364 adapter->pool_config = 1;
1365 ibmveth_close(adapter->netdev);
1366 adapter->pool_config = 0;
1367 }
1368
1369
1370 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1371 adapter->rx_buff_pool[i].active = 1;
1372
1373 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1374 dev->mtu = new_mtu;
1375 vio_cmo_set_dev_desired(viodev,
1376 ibmveth_get_desired_dma
1377 (viodev));
1378 if (need_restart) {
1379 return ibmveth_open(adapter->netdev);
1380 }
1381 return 0;
1382 }
1383 }
1384
1385 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1386 return rc;
1387
1388 return -EINVAL;
1389}
1390
1391#ifdef CONFIG_NET_POLL_CONTROLLER
1392static void ibmveth_poll_controller(struct net_device *dev)
1393{
1394 ibmveth_replenish_task(netdev_priv(dev));
1395 ibmveth_interrupt(dev->irq, dev);
1396}
1397#endif
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1408{
1409 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1410 struct ibmveth_adapter *adapter;
1411 struct iommu_table *tbl;
1412 unsigned long ret;
1413 int i;
1414 int rxqentries = 1;
1415
1416 tbl = get_iommu_table_base(&vdev->dev);
1417
1418
1419 if (netdev == NULL)
1420 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1421
1422 adapter = netdev_priv(netdev);
1423
1424 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1425 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1426
1427 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1428
1429 if (adapter->rx_buff_pool[i].active)
1430 ret +=
1431 adapter->rx_buff_pool[i].size *
1432 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1433 buff_size, tbl);
1434 rxqentries += adapter->rx_buff_pool[i].size;
1435 }
1436
1437 ret += IOMMU_PAGE_ALIGN(
1438 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1439
1440 return ret;
1441}
1442
1443static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1444{
1445 struct ibmveth_adapter *adapter = netdev_priv(dev);
1446 struct sockaddr *addr = p;
1447 u64 mac_address;
1448 int rc;
1449
1450 if (!is_valid_ether_addr(addr->sa_data))
1451 return -EADDRNOTAVAIL;
1452
1453 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1454 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1455 if (rc) {
1456 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1457 return rc;
1458 }
1459
1460 ether_addr_copy(dev->dev_addr, addr->sa_data);
1461
1462 return 0;
1463}
1464
1465static const struct net_device_ops ibmveth_netdev_ops = {
1466 .ndo_open = ibmveth_open,
1467 .ndo_stop = ibmveth_close,
1468 .ndo_start_xmit = ibmveth_start_xmit,
1469 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1470 .ndo_do_ioctl = ibmveth_ioctl,
1471 .ndo_change_mtu = ibmveth_change_mtu,
1472 .ndo_fix_features = ibmveth_fix_features,
1473 .ndo_set_features = ibmveth_set_features,
1474 .ndo_validate_addr = eth_validate_addr,
1475 .ndo_set_mac_address = ibmveth_set_mac_addr,
1476#ifdef CONFIG_NET_POLL_CONTROLLER
1477 .ndo_poll_controller = ibmveth_poll_controller,
1478#endif
1479};
1480
1481static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1482{
1483 int rc, i, mac_len;
1484 struct net_device *netdev;
1485 struct ibmveth_adapter *adapter;
1486 unsigned char *mac_addr_p;
1487 unsigned int *mcastFilterSize_p;
1488 long ret;
1489 unsigned long ret_attr;
1490
1491 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1492 dev->unit_address);
1493
1494 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1495 &mac_len);
1496 if (!mac_addr_p) {
1497 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1498 return -EINVAL;
1499 }
1500
1501 if (mac_len == 8)
1502 mac_addr_p += 2;
1503 else if (mac_len != 6) {
1504 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1505 mac_len);
1506 return -EINVAL;
1507 }
1508
1509 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1510 VETH_MCAST_FILTER_SIZE, NULL);
1511 if (!mcastFilterSize_p) {
1512 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1513 "attribute\n");
1514 return -EINVAL;
1515 }
1516
1517 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1518
1519 if (!netdev)
1520 return -ENOMEM;
1521
1522 adapter = netdev_priv(netdev);
1523 dev_set_drvdata(&dev->dev, netdev);
1524
1525 adapter->vdev = dev;
1526 adapter->netdev = netdev;
1527 adapter->mcastFilterSize = *mcastFilterSize_p;
1528 adapter->pool_config = 0;
1529
1530 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1531
1532 netdev->irq = dev->irq;
1533 netdev->netdev_ops = &ibmveth_netdev_ops;
1534 netdev->ethtool_ops = &netdev_ethtool_ops;
1535 SET_NETDEV_DEV(netdev, &dev->dev);
1536 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1537 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1538
1539 netdev->features |= netdev->hw_features;
1540
1541 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1542
1543
1544 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1545 !old_large_send) {
1546 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1547 netdev->features |= netdev->hw_features;
1548 } else {
1549 netdev->hw_features |= NETIF_F_TSO;
1550 }
1551
1552 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1553
1554 if (firmware_has_feature(FW_FEATURE_CMO))
1555 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1556
1557 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1558 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1559 int error;
1560
1561 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1562 pool_count[i], pool_size[i],
1563 pool_active[i]);
1564 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1565 &dev->dev.kobj, "pool%d", i);
1566 if (!error)
1567 kobject_uevent(kobj, KOBJ_ADD);
1568 }
1569
1570 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1571
1572 adapter->buffer_list_dma = DMA_ERROR_CODE;
1573 adapter->filter_list_dma = DMA_ERROR_CODE;
1574 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1575
1576 netdev_dbg(netdev, "registering netdev...\n");
1577
1578 ibmveth_set_features(netdev, netdev->features);
1579
1580 rc = register_netdev(netdev);
1581
1582 if (rc) {
1583 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1584 free_netdev(netdev);
1585 return rc;
1586 }
1587
1588 netdev_dbg(netdev, "registered\n");
1589
1590 return 0;
1591}
1592
1593static int ibmveth_remove(struct vio_dev *dev)
1594{
1595 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1596 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1597 int i;
1598
1599 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1600 kobject_put(&adapter->rx_buff_pool[i].kobj);
1601
1602 unregister_netdev(netdev);
1603
1604 free_netdev(netdev);
1605 dev_set_drvdata(&dev->dev, NULL);
1606
1607 return 0;
1608}
1609
1610static struct attribute veth_active_attr;
1611static struct attribute veth_num_attr;
1612static struct attribute veth_size_attr;
1613
1614static ssize_t veth_pool_show(struct kobject *kobj,
1615 struct attribute *attr, char *buf)
1616{
1617 struct ibmveth_buff_pool *pool = container_of(kobj,
1618 struct ibmveth_buff_pool,
1619 kobj);
1620
1621 if (attr == &veth_active_attr)
1622 return sprintf(buf, "%d\n", pool->active);
1623 else if (attr == &veth_num_attr)
1624 return sprintf(buf, "%d\n", pool->size);
1625 else if (attr == &veth_size_attr)
1626 return sprintf(buf, "%d\n", pool->buff_size);
1627 return 0;
1628}
1629
1630static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1631 const char *buf, size_t count)
1632{
1633 struct ibmveth_buff_pool *pool = container_of(kobj,
1634 struct ibmveth_buff_pool,
1635 kobj);
1636 struct net_device *netdev = dev_get_drvdata(
1637 container_of(kobj->parent, struct device, kobj));
1638 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1639 long value = simple_strtol(buf, NULL, 10);
1640 long rc;
1641
1642 if (attr == &veth_active_attr) {
1643 if (value && !pool->active) {
1644 if (netif_running(netdev)) {
1645 if (ibmveth_alloc_buffer_pool(pool)) {
1646 netdev_err(netdev,
1647 "unable to alloc pool\n");
1648 return -ENOMEM;
1649 }
1650 pool->active = 1;
1651 adapter->pool_config = 1;
1652 ibmveth_close(netdev);
1653 adapter->pool_config = 0;
1654 if ((rc = ibmveth_open(netdev)))
1655 return rc;
1656 } else {
1657 pool->active = 1;
1658 }
1659 } else if (!value && pool->active) {
1660 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1661 int i;
1662
1663
1664 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1665 if (pool == &adapter->rx_buff_pool[i])
1666 continue;
1667 if (!adapter->rx_buff_pool[i].active)
1668 continue;
1669 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1670 break;
1671 }
1672
1673 if (i == IBMVETH_NUM_BUFF_POOLS) {
1674 netdev_err(netdev, "no active pool >= MTU\n");
1675 return -EPERM;
1676 }
1677
1678 if (netif_running(netdev)) {
1679 adapter->pool_config = 1;
1680 ibmveth_close(netdev);
1681 pool->active = 0;
1682 adapter->pool_config = 0;
1683 if ((rc = ibmveth_open(netdev)))
1684 return rc;
1685 }
1686 pool->active = 0;
1687 }
1688 } else if (attr == &veth_num_attr) {
1689 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1690 return -EINVAL;
1691 } else {
1692 if (netif_running(netdev)) {
1693 adapter->pool_config = 1;
1694 ibmveth_close(netdev);
1695 adapter->pool_config = 0;
1696 pool->size = value;
1697 if ((rc = ibmveth_open(netdev)))
1698 return rc;
1699 } else {
1700 pool->size = value;
1701 }
1702 }
1703 } else if (attr == &veth_size_attr) {
1704 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1705 return -EINVAL;
1706 } else {
1707 if (netif_running(netdev)) {
1708 adapter->pool_config = 1;
1709 ibmveth_close(netdev);
1710 adapter->pool_config = 0;
1711 pool->buff_size = value;
1712 if ((rc = ibmveth_open(netdev)))
1713 return rc;
1714 } else {
1715 pool->buff_size = value;
1716 }
1717 }
1718 }
1719
1720
1721 ibmveth_interrupt(netdev->irq, netdev);
1722 return count;
1723}
1724
1725
1726#define ATTR(_name, _mode) \
1727 struct attribute veth_##_name##_attr = { \
1728 .name = __stringify(_name), .mode = _mode, \
1729 };
1730
1731static ATTR(active, 0644);
1732static ATTR(num, 0644);
1733static ATTR(size, 0644);
1734
1735static struct attribute *veth_pool_attrs[] = {
1736 &veth_active_attr,
1737 &veth_num_attr,
1738 &veth_size_attr,
1739 NULL,
1740};
1741
1742static const struct sysfs_ops veth_pool_ops = {
1743 .show = veth_pool_show,
1744 .store = veth_pool_store,
1745};
1746
1747static struct kobj_type ktype_veth_pool = {
1748 .release = NULL,
1749 .sysfs_ops = &veth_pool_ops,
1750 .default_attrs = veth_pool_attrs,
1751};
1752
1753static int ibmveth_resume(struct device *dev)
1754{
1755 struct net_device *netdev = dev_get_drvdata(dev);
1756 ibmveth_interrupt(netdev->irq, netdev);
1757 return 0;
1758}
1759
1760static struct vio_device_id ibmveth_device_table[] = {
1761 { "network", "IBM,l-lan"},
1762 { "", "" }
1763};
1764MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1765
1766static struct dev_pm_ops ibmveth_pm_ops = {
1767 .resume = ibmveth_resume
1768};
1769
1770static struct vio_driver ibmveth_driver = {
1771 .id_table = ibmveth_device_table,
1772 .probe = ibmveth_probe,
1773 .remove = ibmveth_remove,
1774 .get_desired_dma = ibmveth_get_desired_dma,
1775 .name = ibmveth_driver_name,
1776 .pm = &ibmveth_pm_ops,
1777};
1778
1779static int __init ibmveth_module_init(void)
1780{
1781 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1782 ibmveth_driver_string, ibmveth_driver_version);
1783
1784 return vio_register_driver(&ibmveth_driver);
1785}
1786
1787static void __exit ibmveth_module_exit(void)
1788{
1789 vio_unregister_driver(&ibmveth_driver);
1790}
1791
1792module_init(ibmveth_module_init);
1793module_exit(ibmveth_module_exit);
1794