1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifndef __CXGB4_SCHED_H
36#define __CXGB4_SCHED_H
37
38#include <linux/spinlock.h>
39#include <linux/atomic.h>
40
41#define SCHED_CLS_NONE 0xff
42
43#define FW_SCHED_CLS_NONE 0xffffffff
44
45
46#define SCHED_MAX_RATE_KBPS 100000000U
47
48enum {
49 SCHED_STATE_ACTIVE,
50 SCHED_STATE_UNUSED,
51};
52
53enum sched_fw_ops {
54 SCHED_FW_OP_ADD,
55 SCHED_FW_OP_DEL,
56};
57
58enum sched_bind_type {
59 SCHED_QUEUE,
60 SCHED_FLOWC,
61};
62
63struct sched_queue_entry {
64 struct list_head list;
65 unsigned int cntxt_id;
66 struct ch_sched_queue param;
67};
68
69struct sched_flowc_entry {
70 struct list_head list;
71 struct ch_sched_flowc param;
72};
73
74struct sched_class {
75 u8 state;
76 u8 idx;
77 struct ch_sched_params info;
78 enum sched_bind_type bind_type;
79 struct list_head entry_list;
80 atomic_t refcnt;
81};
82
83struct sched_table {
84 u8 sched_size;
85 struct sched_class tab[];
86};
87
88static inline bool can_sched(struct net_device *dev)
89{
90 struct port_info *pi = netdev2pinfo(dev);
91
92 return !pi->sched_tbl ? false : true;
93}
94
95static inline bool valid_class_id(struct net_device *dev, u8 class_id)
96{
97 struct port_info *pi = netdev2pinfo(dev);
98
99 if ((class_id > pi->sched_tbl->sched_size - 1) &&
100 (class_id != SCHED_CLS_NONE))
101 return false;
102
103 return true;
104}
105
106struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
107 struct ch_sched_queue *p);
108int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
109 enum sched_bind_type type);
110int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
111 enum sched_bind_type type);
112
113struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
114 struct ch_sched_params *p);
115void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
116
117struct sched_table *t4_init_sched(unsigned int size);
118void t4_cleanup_sched(struct adapter *adap);
119#endif
120