1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifndef __CXGB4_SCHED_H
36#define __CXGB4_SCHED_H
37
38#include <linux/spinlock.h>
39#include <linux/atomic.h>
40
41#define SCHED_CLS_NONE 0xff
42
43#define FW_SCHED_CLS_NONE 0xffffffff
44
45
46#define SCHED_MAX_RATE_KBPS 10000000U
47
48enum {
49 SCHED_STATE_ACTIVE,
50 SCHED_STATE_UNUSED,
51};
52
53enum sched_fw_ops {
54 SCHED_FW_OP_ADD,
55};
56
57enum sched_bind_type {
58 SCHED_QUEUE,
59};
60
61struct sched_queue_entry {
62 struct list_head list;
63 unsigned int cntxt_id;
64 struct ch_sched_queue param;
65};
66
67struct sched_class {
68 u8 state;
69 u8 idx;
70 struct ch_sched_params info;
71 struct list_head queue_list;
72 spinlock_t lock;
73 atomic_t refcnt;
74};
75
76struct sched_table {
77 u8 sched_size;
78 rwlock_t rw_lock;
79 struct sched_class tab[0];
80};
81
82static inline bool can_sched(struct net_device *dev)
83{
84 struct port_info *pi = netdev2pinfo(dev);
85
86 return !pi->sched_tbl ? false : true;
87}
88
89static inline bool valid_class_id(struct net_device *dev, u8 class_id)
90{
91 struct port_info *pi = netdev2pinfo(dev);
92
93 if ((class_id > pi->sched_tbl->sched_size - 1) &&
94 (class_id != SCHED_CLS_NONE))
95 return false;
96
97 return true;
98}
99
100int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
101 enum sched_bind_type type);
102int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
103 enum sched_bind_type type);
104
105struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
106 struct ch_sched_params *p);
107
108struct sched_table *t4_init_sched(unsigned int size);
109void t4_cleanup_sched(struct adapter *adap);
110#endif
111