1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include "ixgbe.h"
32#include "ixgbe_mbx.h"
33
34
35
36
37
38
39
40
41
42
43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
44{
45 struct ixgbe_mbx_info *mbx = &hw->mbx;
46
47
48 if (size > mbx->size)
49 size = mbx->size;
50
51 if (!mbx->ops)
52 return IXGBE_ERR_MBX;
53
54 return mbx->ops->read(hw, msg, size, mbx_id);
55}
56
57
58
59
60
61
62
63
64
65
66s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
67{
68 struct ixgbe_mbx_info *mbx = &hw->mbx;
69
70 if (size > mbx->size)
71 return IXGBE_ERR_MBX;
72
73 if (!mbx->ops)
74 return IXGBE_ERR_MBX;
75
76 return mbx->ops->write(hw, msg, size, mbx_id);
77}
78
79
80
81
82
83
84
85
86s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
87{
88 struct ixgbe_mbx_info *mbx = &hw->mbx;
89
90 if (!mbx->ops)
91 return IXGBE_ERR_MBX;
92
93 return mbx->ops->check_for_msg(hw, mbx_id);
94}
95
96
97
98
99
100
101
102
103s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
104{
105 struct ixgbe_mbx_info *mbx = &hw->mbx;
106
107 if (!mbx->ops)
108 return IXGBE_ERR_MBX;
109
110 return mbx->ops->check_for_ack(hw, mbx_id);
111}
112
113
114
115
116
117
118
119
120s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
121{
122 struct ixgbe_mbx_info *mbx = &hw->mbx;
123
124 if (!mbx->ops)
125 return IXGBE_ERR_MBX;
126
127 return mbx->ops->check_for_rst(hw, mbx_id);
128}
129
130
131
132
133
134
135
136
137static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
138{
139 struct ixgbe_mbx_info *mbx = &hw->mbx;
140 int countdown = mbx->timeout;
141
142 if (!countdown || !mbx->ops)
143 return IXGBE_ERR_MBX;
144
145 while (mbx->ops->check_for_msg(hw, mbx_id)) {
146 countdown--;
147 if (!countdown)
148 return IXGBE_ERR_MBX;
149 udelay(mbx->usec_delay);
150 }
151
152 return 0;
153}
154
155
156
157
158
159
160
161
162static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
163{
164 struct ixgbe_mbx_info *mbx = &hw->mbx;
165 int countdown = mbx->timeout;
166
167 if (!countdown || !mbx->ops)
168 return IXGBE_ERR_MBX;
169
170 while (mbx->ops->check_for_ack(hw, mbx_id)) {
171 countdown--;
172 if (!countdown)
173 return IXGBE_ERR_MBX;
174 udelay(mbx->usec_delay);
175 }
176
177 return 0;
178}
179
180
181
182
183
184
185
186
187
188
189
190static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
191 u16 mbx_id)
192{
193 struct ixgbe_mbx_info *mbx = &hw->mbx;
194 s32 ret_val;
195
196 if (!mbx->ops)
197 return IXGBE_ERR_MBX;
198
199 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
200 if (ret_val)
201 return ret_val;
202
203
204 return mbx->ops->read(hw, msg, size, mbx_id);
205}
206
207
208
209
210
211
212
213
214
215
216
217static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
218 u16 mbx_id)
219{
220 struct ixgbe_mbx_info *mbx = &hw->mbx;
221 s32 ret_val;
222
223
224 if (!mbx->ops || !mbx->timeout)
225 return IXGBE_ERR_MBX;
226
227
228 ret_val = mbx->ops->write(hw, msg, size, mbx_id);
229 if (ret_val)
230 return ret_val;
231
232
233 return ixgbe_poll_for_ack(hw, mbx_id);
234}
235
236static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
237{
238 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
239
240 if (mbvficr & mask) {
241 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
242 return 0;
243 }
244
245 return IXGBE_ERR_MBX;
246}
247
248
249
250
251
252
253
254
255static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
256{
257 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
258 u32 vf_bit = vf_number % 16;
259
260 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
261 index)) {
262 hw->mbx.stats.reqs++;
263 return 0;
264 }
265
266 return IXGBE_ERR_MBX;
267}
268
269
270
271
272
273
274
275
276static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
277{
278 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
279 u32 vf_bit = vf_number % 16;
280
281 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
282 index)) {
283 hw->mbx.stats.acks++;
284 return 0;
285 }
286
287 return IXGBE_ERR_MBX;
288}
289
290
291
292
293
294
295
296
297static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
298{
299 u32 reg_offset = (vf_number < 32) ? 0 : 1;
300 u32 vf_shift = vf_number % 32;
301 u32 vflre = 0;
302
303 switch (hw->mac.type) {
304 case ixgbe_mac_82599EB:
305 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
306 break;
307 case ixgbe_mac_X540:
308 case ixgbe_mac_X550:
309 case ixgbe_mac_X550EM_x:
310 case ixgbe_mac_x550em_a:
311 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
312 break;
313 default:
314 break;
315 }
316
317 if (vflre & BIT(vf_shift)) {
318 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
319 hw->mbx.stats.rsts++;
320 return 0;
321 }
322
323 return IXGBE_ERR_MBX;
324}
325
326
327
328
329
330
331
332
333static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
334{
335 u32 p2v_mailbox;
336
337
338 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
339
340
341 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
342 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
343 return 0;
344
345 return IXGBE_ERR_MBX;
346}
347
348
349
350
351
352
353
354
355
356
357static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
358 u16 vf_number)
359{
360 s32 ret_val;
361 u16 i;
362
363
364 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
365 if (ret_val)
366 return ret_val;
367
368
369 ixgbe_check_for_msg_pf(hw, vf_number);
370 ixgbe_check_for_ack_pf(hw, vf_number);
371
372
373 for (i = 0; i < size; i++)
374 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
375
376
377 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
378
379
380 hw->mbx.stats.msgs_tx++;
381
382 return 0;
383}
384
385
386
387
388
389
390
391
392
393
394
395
396static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
397 u16 vf_number)
398{
399 s32 ret_val;
400 u16 i;
401
402
403 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
404 if (ret_val)
405 return ret_val;
406
407
408 for (i = 0; i < size; i++)
409 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
410
411
412 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
413
414
415 hw->mbx.stats.msgs_rx++;
416
417 return 0;
418}
419
420#ifdef CONFIG_PCI_IOV
421
422
423
424
425
426
427void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
428{
429 struct ixgbe_mbx_info *mbx = &hw->mbx;
430
431 if (hw->mac.type != ixgbe_mac_82599EB &&
432 hw->mac.type != ixgbe_mac_X550 &&
433 hw->mac.type != ixgbe_mac_X550EM_x &&
434 hw->mac.type != ixgbe_mac_x550em_a &&
435 hw->mac.type != ixgbe_mac_X540)
436 return;
437
438 mbx->timeout = 0;
439 mbx->usec_delay = 0;
440
441 mbx->stats.msgs_tx = 0;
442 mbx->stats.msgs_rx = 0;
443 mbx->stats.reqs = 0;
444 mbx->stats.acks = 0;
445 mbx->stats.rsts = 0;
446
447 mbx->size = IXGBE_VFMAILBOX_SIZE;
448}
449#endif
450
451const struct ixgbe_mbx_operations mbx_ops_generic = {
452 .read = ixgbe_read_mbx_pf,
453 .write = ixgbe_write_mbx_pf,
454 .read_posted = ixgbe_read_posted_mbx,
455 .write_posted = ixgbe_write_posted_mbx,
456 .check_for_msg = ixgbe_check_for_msg_pf,
457 .check_for_ack = ixgbe_check_for_ack_pf,
458 .check_for_rst = ixgbe_check_for_rst_pf,
459};
460
461