1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#ifndef __OCTEON_NETWORK_H__
25#define __OCTEON_NETWORK_H__
26#include <linux/ptp_clock_kernel.h>
27
28#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29#define LIO_MIN_MTU_SIZE ETH_MIN_MTU
30
31
32#define LIO_IFSTATE_DROQ_OPS 0x01
33#define LIO_IFSTATE_REGISTERED 0x02
34#define LIO_IFSTATE_RUNNING 0x04
35#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
36#define LIO_IFSTATE_RESETTING 0x10
37
38struct liquidio_if_cfg_resp {
39 u64 rh;
40 struct liquidio_if_cfg_info cfg_info;
41 u64 status;
42};
43
44#define LIO_IFCFG_WAIT_TIME 3000
45#define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200
46
47
48
49
50struct octnic_gather {
51
52 struct list_head list;
53
54
55 int sg_size;
56
57
58 int adjust;
59
60
61
62
63 struct octeon_sg_entry *sg;
64
65 dma_addr_t sg_dma_ptr;
66};
67
68struct oct_nic_stats_resp {
69 u64 rh;
70 struct oct_link_stats stats;
71 u64 status;
72};
73
74struct oct_nic_vf_stats_resp {
75 u64 rh;
76 u64 spoofmac_cnt;
77 u64 status;
78};
79
80struct oct_nic_stats_ctrl {
81 struct completion complete;
82 struct net_device *netdev;
83};
84
85struct oct_nic_seapi_resp {
86 u64 rh;
87 union {
88 u32 fec_setting;
89 u32 speed;
90 };
91 u64 status;
92};
93
94
95struct lio {
96
97 atomic_t ifstate;
98
99
100
101
102 int ifidx;
103
104
105 int txq;
106
107
108
109
110 int rxq;
111
112
113 spinlock_t *glist_lock;
114
115
116 struct list_head *glist;
117 void **glists_virt_base;
118 dma_addr_t *glists_dma_base;
119 u32 glist_entry_size;
120
121
122
123
124 struct octdev_props *octprops;
125
126
127 struct octeon_device *oct_dev;
128
129 struct net_device *netdev;
130
131
132 struct oct_link_info linfo;
133
134
135 u64 link_changes;
136
137
138 u32 tx_qsize;
139
140
141 u32 rx_qsize;
142
143
144 u32 mtu;
145
146
147 u32 msg_enable;
148
149
150 u64 dev_capability;
151
152
153
154
155
156 u64 enc_dev_capability;
157
158
159 u32 phy_beacon_val;
160
161
162 u32 led_ctrl_val;
163
164
165 struct ptp_clock_info ptp_info;
166 struct ptp_clock *ptp_clock;
167 s64 ptp_adjust;
168
169
170 spinlock_t ptp_lock;
171
172
173 u32 intf_open;
174
175
176 struct cavium_wq txq_status_wq;
177
178
179 struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
180
181
182 struct cavium_wq link_status_wq;
183
184
185 struct cavium_wq sync_octeon_time_wq;
186
187 int netdev_uc_count;
188 struct cavium_wk stats_wk;
189};
190
191#define LIO_SIZE (sizeof(struct lio))
192#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
193
194#define LIO_MAX_CORES 16
195
196
197
198
199
200
201
202int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
203
204int setup_rx_oom_poll_fn(struct net_device *netdev);
205
206void cleanup_rx_oom_poll_fn(struct net_device *netdev);
207
208
209
210
211
212
213
214
215
216
217void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
218
219int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
220 u32 num_iqs, u32 num_oqs);
221
222irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
223 void *dev);
224
225int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
226
227void lio_fetch_stats(struct work_struct *work);
228
229int lio_wait_for_clean_oq(struct octeon_device *oct);
230
231
232
233
234void liquidio_set_ethtool_ops(struct net_device *netdev);
235
236void lio_delete_glists(struct lio *lio);
237
238int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
239
240int liquidio_get_speed(struct lio *lio);
241int liquidio_set_speed(struct lio *lio, int speed);
242int liquidio_get_fec(struct lio *lio);
243int liquidio_set_fec(struct lio *lio, int on_off);
244
245
246
247
248
249int liquidio_change_mtu(struct net_device *netdev, int new_mtu);
250#define LIO_CHANGE_MTU_SUCCESS 1
251#define LIO_CHANGE_MTU_FAIL 2
252
253#define SKB_ADJ_MASK 0x3F
254#define SKB_ADJ (SKB_ADJ_MASK + 1)
255
256#define MIN_SKB_SIZE 256
257#define LIO_RXBUFFER_SZ 2048
258
259static inline void
260*recv_buffer_alloc(struct octeon_device *oct,
261 struct octeon_skb_page_info *pg_info)
262{
263 struct page *page;
264 struct sk_buff *skb;
265 struct octeon_skb_page_info *skb_pg_info;
266
267 page = alloc_page(GFP_ATOMIC);
268 if (unlikely(!page))
269 return NULL;
270
271 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
272 if (unlikely(!skb)) {
273 __free_page(page);
274 pg_info->page = NULL;
275 return NULL;
276 }
277
278 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
279 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
280
281 skb_reserve(skb, r);
282 }
283
284 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
285
286 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
287 PAGE_SIZE, DMA_FROM_DEVICE);
288
289
290 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
291 __free_page(page);
292 dev_kfree_skb_any((struct sk_buff *)skb);
293 pg_info->page = NULL;
294 return NULL;
295 }
296
297 pg_info->page = page;
298 pg_info->page_offset = 0;
299 skb_pg_info->page = page;
300 skb_pg_info->page_offset = 0;
301 skb_pg_info->dma = pg_info->dma;
302
303 return (void *)skb;
304}
305
306static inline void
307*recv_buffer_fast_alloc(u32 size)
308{
309 struct sk_buff *skb;
310 struct octeon_skb_page_info *skb_pg_info;
311
312 skb = dev_alloc_skb(size + SKB_ADJ);
313 if (unlikely(!skb))
314 return NULL;
315
316 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
317 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
318
319 skb_reserve(skb, r);
320 }
321
322 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
323 skb_pg_info->page = NULL;
324 skb_pg_info->page_offset = 0;
325 skb_pg_info->dma = 0;
326
327 return skb;
328}
329
330static inline int
331recv_buffer_recycle(struct octeon_device *oct, void *buf)
332{
333 struct octeon_skb_page_info *pg_info = buf;
334
335 if (!pg_info->page) {
336 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
337 __func__);
338 return -ENOMEM;
339 }
340
341 if (unlikely(page_count(pg_info->page) != 1) ||
342 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
343 dma_unmap_page(&oct->pci_dev->dev,
344 pg_info->dma, (PAGE_SIZE << 0),
345 DMA_FROM_DEVICE);
346 pg_info->dma = 0;
347 pg_info->page = NULL;
348 pg_info->page_offset = 0;
349 return -ENOMEM;
350 }
351
352
353 if (pg_info->page_offset == 0)
354 pg_info->page_offset = LIO_RXBUFFER_SZ;
355 else
356 pg_info->page_offset = 0;
357 page_ref_inc(pg_info->page);
358
359 return 0;
360}
361
362static inline void
363*recv_buffer_reuse(struct octeon_device *oct, void *buf)
364{
365 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
366 struct sk_buff *skb;
367
368 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
369 if (unlikely(!skb)) {
370 dma_unmap_page(&oct->pci_dev->dev,
371 pg_info->dma, (PAGE_SIZE << 0),
372 DMA_FROM_DEVICE);
373 return NULL;
374 }
375
376 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
377 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
378
379 skb_reserve(skb, r);
380 }
381
382 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
383 skb_pg_info->page = pg_info->page;
384 skb_pg_info->page_offset = pg_info->page_offset;
385 skb_pg_info->dma = pg_info->dma;
386
387 return skb;
388}
389
390static inline void
391recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
392{
393 struct sk_buff *skb = (struct sk_buff *)buffer;
394
395 put_page(pg_info->page);
396 pg_info->dma = 0;
397 pg_info->page = NULL;
398 pg_info->page_offset = 0;
399
400 if (skb)
401 dev_kfree_skb_any(skb);
402}
403
404static inline void recv_buffer_free(void *buffer)
405{
406 struct sk_buff *skb = (struct sk_buff *)buffer;
407 struct octeon_skb_page_info *pg_info;
408
409 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
410
411 if (pg_info->page) {
412 put_page(pg_info->page);
413 pg_info->dma = 0;
414 pg_info->page = NULL;
415 pg_info->page_offset = 0;
416 }
417
418 dev_kfree_skb_any((struct sk_buff *)buffer);
419}
420
421static inline void
422recv_buffer_fast_free(void *buffer)
423{
424 dev_kfree_skb_any((struct sk_buff *)buffer);
425}
426
427static inline void tx_buffer_free(void *buffer)
428{
429 dev_kfree_skb_any((struct sk_buff *)buffer);
430}
431
432#define lio_dma_alloc(oct, size, dma_addr) \
433 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
434#define lio_dma_free(oct, size, virt_addr, dma_addr) \
435 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
436
437static inline
438void *get_rbd(struct sk_buff *skb)
439{
440 struct octeon_skb_page_info *pg_info;
441 unsigned char *va;
442
443 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
444 va = page_address(pg_info->page) + pg_info->page_offset;
445
446 return va;
447}
448
449static inline u64
450lio_map_ring(void *buf)
451{
452 dma_addr_t dma_addr;
453
454 struct sk_buff *skb = (struct sk_buff *)buf;
455 struct octeon_skb_page_info *pg_info;
456
457 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
458 if (!pg_info->page) {
459 pr_err("%s: pg_info->page NULL\n", __func__);
460 WARN_ON(1);
461 }
462
463
464 dma_addr = pg_info->dma;
465 if (!pg_info->dma) {
466 pr_err("%s: ERROR it should be already available\n",
467 __func__);
468 WARN_ON(1);
469 }
470 dma_addr += pg_info->page_offset;
471
472 return (u64)dma_addr;
473}
474
475static inline void
476lio_unmap_ring(struct pci_dev *pci_dev,
477 u64 buf_ptr)
478
479{
480 dma_unmap_page(&pci_dev->dev,
481 buf_ptr, (PAGE_SIZE << 0),
482 DMA_FROM_DEVICE);
483}
484
485static inline void *octeon_fast_packet_alloc(u32 size)
486{
487 return recv_buffer_fast_alloc(size);
488}
489
490static inline void octeon_fast_packet_next(struct octeon_droq *droq,
491 struct sk_buff *nicbuf,
492 int copy_len,
493 int idx)
494{
495 skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
496 copy_len);
497}
498
499
500
501
502
503
504static inline int ifstate_check(struct lio *lio, int state_flag)
505{
506 return atomic_read(&lio->ifstate) & state_flag;
507}
508
509
510
511
512
513
514static inline void ifstate_set(struct lio *lio, int state_flag)
515{
516 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
517}
518
519
520
521
522
523
524static inline void ifstate_reset(struct lio *lio, int state_flag)
525{
526 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
527}
528
529
530
531
532
533
534
535static inline int wait_for_pending_requests(struct octeon_device *oct)
536{
537 int i, pcount = 0;
538
539 for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
540 pcount = atomic_read(
541 &oct->response_list[OCTEON_ORDERED_SC_LIST]
542 .pending_req_count);
543 if (pcount)
544 schedule_timeout_uninterruptible(HZ / 10);
545 else
546 break;
547 }
548
549 if (pcount)
550 return 1;
551
552 return 0;
553}
554
555
556
557
558
559static inline void stop_txqs(struct net_device *netdev)
560{
561 int i;
562
563 for (i = 0; i < netdev->real_num_tx_queues; i++)
564 netif_stop_subqueue(netdev, i);
565}
566
567
568
569
570
571static inline void wake_txqs(struct net_device *netdev)
572{
573 struct lio *lio = GET_LIO(netdev);
574 int i, qno;
575
576 for (i = 0; i < netdev->real_num_tx_queues; i++) {
577 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
578
579 if (__netif_subqueue_stopped(netdev, i)) {
580 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
581 tx_restart, 1);
582 netif_wake_subqueue(netdev, i);
583 }
584 }
585}
586
587
588
589
590
591static inline void start_txqs(struct net_device *netdev)
592{
593 struct lio *lio = GET_LIO(netdev);
594 int i;
595
596 if (lio->linfo.link.s.link_up) {
597 for (i = 0; i < netdev->real_num_tx_queues; i++)
598 netif_start_subqueue(netdev, i);
599 }
600}
601
602static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
603{
604 return skb->queue_mapping % oct->num_iqs;
605}
606
607
608
609
610
611static inline struct list_head *lio_list_delete_head(struct list_head *root)
612{
613 struct list_head *node;
614
615 if (list_empty_careful(root))
616 node = NULL;
617 else
618 node = root->next;
619
620 if (node)
621 list_del(node);
622
623 return node;
624}
625
626#endif
627