1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xilinx_axienet.h"
19#include "xilinx_tsn_shaper.h"
20
21static inline int axienet_map_gs_to_hw(struct axienet_local *lp, u32 gs)
22{
23 u8 be_queue = 0;
24 u8 re_queue = 1;
25 u8 st_queue = 2;
26 unsigned int acl_bit_map = 0;
27
28 if (lp->num_tc == 2)
29 st_queue = 1;
30
31 if (gs & GS_BE_OPEN)
32 acl_bit_map |= (1 << be_queue);
33 if (gs & GS_ST_OPEN)
34 acl_bit_map |= (1 << st_queue);
35 if (lp->num_tc == 3 && (gs & GS_RE_OPEN))
36 acl_bit_map |= (1 << re_queue);
37
38 return acl_bit_map;
39}
40
41static int __axienet_set_schedule(struct net_device *ndev, struct qbv_info *qbv)
42{
43 struct axienet_local *lp = netdev_priv(ndev);
44 u16 i;
45 unsigned int acl_bit_map = 0;
46 u32 u_config_change = 0;
47 u8 port = qbv->port;
48
49 if (qbv->cycle_time == 0) {
50
51 u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
52
53 u_config_change |= CC_ADMIN_GATE_STATE_SHIFT;
54
55 axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
56
57 return 0;
58 }
59
60 if (axienet_ior(lp, PORT_STATUS(port)) & 1) {
61 if (qbv->force) {
62 u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
63 axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
64 } else {
65 return -EALREADY;
66 }
67 }
68
69 axienet_iow(lp, ADMIN_CYCLE_TIME_DENOMINATOR(port),
70 qbv->cycle_time & CYCLE_TIME_DENOMINATOR_MASK);
71
72 axienet_iow(lp, ADMIN_BASE_TIME_NS(port), qbv->ptp_time_ns);
73
74 axienet_iow(lp, ADMIN_BASE_TIME_SEC(port),
75 qbv->ptp_time_sec & 0xFFFFFFFF);
76 axienet_iow(lp, ADMIN_BASE_TIME_SECS(port),
77 (qbv->ptp_time_sec >> 32) & BASE_TIME_SECS_MASK);
78
79 u_config_change = axienet_ior(lp, CONFIG_CHANGE(port));
80
81 u_config_change &= ~(CC_ADMIN_CTRL_LIST_LENGTH_MASK <<
82 CC_ADMIN_CTRL_LIST_LENGTH_SHIFT);
83 u_config_change |= (qbv->list_length & CC_ADMIN_CTRL_LIST_LENGTH_MASK)
84 << CC_ADMIN_CTRL_LIST_LENGTH_SHIFT;
85
86
87 for (i = 0; i < qbv->list_length; i++) {
88 acl_bit_map = axienet_map_gs_to_hw(lp, qbv->acl_gate_state[i]);
89 axienet_iow(lp, ADMIN_CTRL_LIST(port, i),
90 (acl_bit_map & (ACL_GATE_STATE_MASK)) <<
91 ACL_GATE_STATE_SHIFT);
92
93
94 axienet_iow(lp, ADMIN_CTRL_LIST_TIME(port, i),
95 qbv->acl_gate_time[i] & CTRL_LIST_TIME_INTERVAL_MASK);
96 }
97
98
99 axienet_iow(lp, INT_STATUS(port), 0);
100
101
102 u_config_change |= CC_ADMIN_CONFIG_CHANGE_BIT;
103
104
105 u_config_change |= CC_ADMIN_GATE_ENABLE_BIT;
106
107
108 axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
109
110 return 0;
111}
112
113int axienet_set_schedule(struct net_device *ndev, void __user *useraddr)
114{
115 struct qbv_info *config;
116 int ret;
117
118 config = kmalloc(sizeof(*config), GFP_KERNEL);
119 if (!config)
120 return -ENOMEM;
121
122 if (copy_from_user(config, useraddr, sizeof(struct qbv_info))) {
123 ret = -EFAULT;
124 goto out;
125 }
126
127 pr_debug("setting new schedule\n");
128
129 ret = __axienet_set_schedule(ndev, config);
130out:
131 kfree(config);
132 return ret;
133}
134
135static int __axienet_get_schedule(struct net_device *ndev, struct qbv_info *qbv)
136{
137 struct axienet_local *lp = netdev_priv(ndev);
138 u16 i = 0;
139 u32 u_value = 0;
140 u8 port = qbv->port;
141
142 if (!(axienet_ior(lp, CONFIG_CHANGE(port)) &
143 CC_ADMIN_GATE_ENABLE_BIT)) {
144 qbv->cycle_time = 0;
145 return 0;
146 }
147
148 u_value = axienet_ior(lp, GATE_STATE(port));
149 qbv->list_length = (u_value >> CC_ADMIN_CTRL_LIST_LENGTH_SHIFT) &
150 CC_ADMIN_CTRL_LIST_LENGTH_MASK;
151
152 u_value = axienet_ior(lp, OPER_CYCLE_TIME_DENOMINATOR(port));
153 qbv->cycle_time = u_value & CYCLE_TIME_DENOMINATOR_MASK;
154
155 u_value = axienet_ior(lp, OPER_BASE_TIME_NS(port));
156 qbv->ptp_time_ns = u_value & OPER_BASE_TIME_NS_MASK;
157
158 qbv->ptp_time_sec = axienet_ior(lp, OPER_BASE_TIME_SEC(port));
159 u_value = axienet_ior(lp, OPER_BASE_TIME_SECS(port));
160 qbv->ptp_time_sec |= (u64)(u_value & BASE_TIME_SECS_MASK) << 32;
161
162 for (i = 0; i < qbv->list_length; i++) {
163 u_value = axienet_ior(lp, OPER_CTRL_LIST(port, i));
164 qbv->acl_gate_state[i] = (u_value >> ACL_GATE_STATE_SHIFT) &
165 ACL_GATE_STATE_MASK;
166
167
168
169
170 if (lp->num_tc == 2 && qbv->acl_gate_state[i] == 2)
171 qbv->acl_gate_state[i] = 4;
172
173 u_value = axienet_ior(lp, OPER_CTRL_LIST_TIME(port, i));
174 qbv->acl_gate_time[i] = u_value & CTRL_LIST_TIME_INTERVAL_MASK;
175 }
176 return 0;
177}
178
179int axienet_get_schedule(struct net_device *ndev, void __user *useraddr)
180{
181 struct qbv_info *qbv;
182 int ret = 0;
183
184 qbv = kmalloc(sizeof(*qbv), GFP_KERNEL);
185 if (!qbv)
186 return -ENOMEM;
187
188 if (copy_from_user(qbv, useraddr, sizeof(struct qbv_info))) {
189 ret = -EFAULT;
190 goto out;
191 }
192
193 __axienet_get_schedule(ndev, qbv);
194
195 if (copy_to_user(useraddr, qbv, sizeof(struct qbv_info)))
196 ret = -EFAULT;
197out:
198 kfree(qbv);
199 return ret;
200}
201
202static irqreturn_t axienet_qbv_irq(int irq, void *_ndev)
203{
204 struct net_device *ndev = _ndev;
205 struct axienet_local *lp = netdev_priv(ndev);
206 u8 port = 0;
207
208
209 axienet_iow(lp, INT_CLEAR(port), 0);
210
211 return IRQ_HANDLED;
212}
213
214int axienet_qbv_init(struct net_device *ndev)
215{
216 struct axienet_local *lp = netdev_priv(ndev);
217 int rc;
218
219 rc = request_irq(lp->qbv_irq, axienet_qbv_irq, 0, ndev->name, ndev);
220 if (rc)
221 goto err_qbv_irq;
222
223err_qbv_irq:
224 return rc;
225}
226
227void axienet_qbv_remove(struct net_device *ndev)
228{
229 struct axienet_local *lp = netdev_priv(ndev);
230
231 free_irq(lp->qbv_irq, ndev);
232}
233