1
2
3
4
5
6
7#include <linux/bitfield.h>
8#include <linux/pci.h>
9#include "rvu.h"
10#include "cgx.h"
11#include "rvu_reg.h"
12
13
14#define LMT_TBL_OP_READ 0
15#define LMT_TBL_OP_WRITE 1
16#define LMT_MAP_TABLE_SIZE (128 * 1024)
17#define LMT_MAPTBL_ENTRY_SIZE 16
18
19
20static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
21 int lmt_tbl_op)
22{
23 void __iomem *lmt_map_base;
24 u64 tbl_base;
25
26 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
27
28 lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
29 if (!lmt_map_base) {
30 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
31 return -ENOMEM;
32 }
33
34 if (lmt_tbl_op == LMT_TBL_OP_READ) {
35 *val = readq(lmt_map_base + index);
36 } else {
37 writeq((*val), (lmt_map_base + index));
38
39
40
41
42
43 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
44 rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
45 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
46 }
47
48 iounmap(lmt_map_base);
49 return 0;
50}
51
52#define LMT_MAP_TBL_W1_OFF 8
53static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
54{
55 return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
56 (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
57}
58
59static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
60 u64 iova, u64 *lmt_addr)
61{
62 u64 pa, val, pf;
63 int err;
64
65 if (!iova) {
66 dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
67 return -EINVAL;
68 }
69
70 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
71 pf = rvu_get_pf(pcifunc) & 0x1F;
72 val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
73 ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
74 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
75
76 err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
77 if (err) {
78 dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
79 return err;
80 }
81 val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
82 if (val & ~0x1ULL) {
83 dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
84 return -EIO;
85 }
86
87
88
89 pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
90 pa &= GENMASK_ULL(39, 0);
91 *lmt_addr = (pa << 12) | (iova & 0xFFF);
92
93 return 0;
94}
95
96static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
97{
98 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
99 u32 tbl_idx;
100 int err = 0;
101 u64 val;
102
103
104 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
105 err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
106 if (err) {
107 dev_err(rvu->dev,
108 "Failed to read LMT map table: index 0x%x err %d\n",
109 tbl_idx, err);
110 return err;
111 }
112
113
114
115
116
117 if (!pfvf->lmt_base_addr)
118 pfvf->lmt_base_addr = val;
119
120
121 err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
122 if (err) {
123 dev_err(rvu->dev,
124 "Failed to update LMT map table: index 0x%x err %d\n",
125 tbl_idx, err);
126 return err;
127 }
128 return 0;
129}
130
131int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
132 struct lmtst_tbl_setup_req *req,
133 struct msg_rsp *rsp)
134{
135 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
136 u32 pri_tbl_idx, tbl_idx;
137 u64 lmt_addr;
138 int err = 0;
139 u64 val;
140
141
142
143
144
145 if (req->use_local_lmt_region) {
146 err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
147 req->lmt_iova, &lmt_addr);
148 if (err < 0)
149 return err;
150
151
152 err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
153 if (err)
154 return err;
155 }
156
157
158
159
160
161
162
163 if (req->base_pcifunc) {
164
165
166
167 pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
168
169
170 err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
171 LMT_TBL_OP_READ);
172 if (err) {
173 dev_err(rvu->dev,
174 "Failed to read LMT map table: index 0x%x err %d\n",
175 pri_tbl_idx, err);
176 goto error;
177 }
178
179
180
181
182 err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
183 if (err)
184 return err;
185 }
186
187
188
189
190
191 if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
192 tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
193 err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
194 &val, LMT_TBL_OP_READ);
195 if (err) {
196 dev_err(rvu->dev,
197 "Failed to read LMT map table: index 0x%x err %d\n",
198 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
199 goto error;
200 }
201
202
203
204
205
206 if (!pfvf->lmt_map_ent_w1)
207 pfvf->lmt_map_ent_w1 = val;
208
209
210 if (req->dis_sched_early_comp)
211 val |= (req->dis_sched_early_comp <<
212 APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
213
214 if (req->sch_ena)
215 val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
216 req->ssow_pf_func;
217
218 if (req->dis_line_pref)
219 val |= (req->dis_line_pref <<
220 APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
221
222 err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
223 &val, LMT_TBL_OP_WRITE);
224 if (err) {
225 dev_err(rvu->dev,
226 "Failed to update LMT map table: index 0x%x err %d\n",
227 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
228 goto error;
229 }
230 }
231
232error:
233 return err;
234}
235
236
237void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
238{
239 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
240 u32 tbl_idx;
241 int err;
242
243 if (is_rvu_otx2(rvu))
244 return;
245
246 if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
247
248 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
249
250
251
252 if (pfvf->lmt_base_addr) {
253 err = lmtst_map_table_ops(rvu, tbl_idx,
254 &pfvf->lmt_base_addr,
255 LMT_TBL_OP_WRITE);
256 if (err)
257 dev_err(rvu->dev,
258 "Failed to update LMT map table: index 0x%x err %d\n",
259 tbl_idx, err);
260 pfvf->lmt_base_addr = 0;
261 }
262
263
264
265 if (pfvf->lmt_map_ent_w1) {
266 err = lmtst_map_table_ops(rvu,
267 tbl_idx + LMT_MAP_TBL_W1_OFF,
268 &pfvf->lmt_map_ent_w1,
269 LMT_TBL_OP_WRITE);
270 if (err)
271 dev_err(rvu->dev,
272 "Failed to update LMT map table: index 0x%x err %d\n",
273 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
274 pfvf->lmt_map_ent_w1 = 0;
275 }
276 }
277}
278
279int rvu_set_channels_base(struct rvu *rvu)
280{
281 u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
282 u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
283 struct rvu_hwinfo *hw = rvu->hw;
284 u64 nix_const, nix_const1;
285 int blkaddr;
286
287 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
288 if (blkaddr < 0)
289 return blkaddr;
290
291 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
292 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
293
294 hw->cgx = (nix_const >> 12) & 0xFULL;
295 hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
296 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
297 hw->lbk_links = (nix_const >> 24) & 0xFULL;
298 hw->cpt_links = (nix_const >> 44) & 0xFULL;
299 hw->sdp_links = 1;
300
301 hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
302 hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
303 hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
304
305
306 if (!(nix_const & BIT_ULL(60)))
307 return 0;
308
309 hw->cap.programmable_chans = true;
310
311
312
313
314
315
316
317
318
319 nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
320 nr_sdp_chans = nix_const1 & 0xFFFULL;
321 nr_cgx_chans = nix_const & 0xFFULL;
322 nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
323
324 sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
325
326 hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
327
328 cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
329 hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
330
331 cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
332 hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
333
334
335
336
337 if (cpt_chan_base <= 0x800) {
338 hw->cpt_chan_base = 0x800;
339 } else {
340 dev_err(rvu->dev,
341 "CPT channels could not fit in the range 2048-4095\n");
342 return -EINVAL;
343 }
344
345 return 0;
346}
347
348#define LBK_CONNECT_NIXX(a) (0x0 + (a))
349
350static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
351 u64 offset, int lbkid, u16 chans)
352{
353 struct rvu_hwinfo *hw = rvu->hw;
354 u64 cfg;
355
356 cfg = readq(base + offset);
357 cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
358 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
359 cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
360 cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
361 cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
362
363 writeq(cfg, base + offset);
364}
365
366static void rvu_lbk_set_channels(struct rvu *rvu)
367{
368 struct pci_dev *pdev = NULL;
369 void __iomem *base;
370 u64 lbk_const;
371 u8 src, dst;
372 u16 chans;
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388 while (true) {
389 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
390 PCI_DEVID_OCTEONTX2_LBK, pdev);
391 if (!pdev)
392 return;
393
394 base = pci_ioremap_bar(pdev, 0);
395 if (!base)
396 goto err_put;
397
398 lbk_const = readq(base + LBK_CONST);
399 chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
400 dst = FIELD_GET(LBK_CONST_DST, lbk_const);
401 src = FIELD_GET(LBK_CONST_SRC, lbk_const);
402
403 if (src == dst) {
404 if (src == LBK_CONNECT_NIXX(0)) {
405 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
406 0, chans);
407 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
408 0, chans);
409 } else if (src == LBK_CONNECT_NIXX(1)) {
410 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
411 1, chans);
412 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
413 1, chans);
414 }
415 } else {
416 if (src == LBK_CONNECT_NIXX(0)) {
417 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
418 0, chans);
419 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
420 1, chans);
421 } else if (src == LBK_CONNECT_NIXX(1)) {
422 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
423 1, chans);
424 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
425 0, chans);
426 }
427 }
428 iounmap(base);
429 }
430err_put:
431 pci_dev_put(pdev);
432}
433
434static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
435{
436 u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
437 u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
438 u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
439 struct rvu_hwinfo *hw = rvu->hw;
440 int link, nix_link = 0;
441 u16 start;
442 u64 cfg;
443
444 cgx_chans = nix_const & 0xFFULL;
445 lbk_chans = (nix_const >> 16) & 0xFFULL;
446 sdp_chans = nix_const1 & 0xFFFULL;
447 cpt_chans = (nix_const >> 32) & 0xFFFULL;
448
449 start = hw->cgx_chan_base;
450 for (link = 0; link < hw->cgx_links; link++, nix_link++) {
451 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
452 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
453 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
454 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
455 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
456 start += cgx_chans;
457 }
458
459 start = hw->lbk_chan_base;
460 for (link = 0; link < hw->lbk_links; link++, nix_link++) {
461 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
462 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
463 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
464 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
465 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
466 start += lbk_chans;
467 }
468
469 start = hw->sdp_chan_base;
470 for (link = 0; link < hw->sdp_links; link++, nix_link++) {
471 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
472 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
473 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
474 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
475 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
476 start += sdp_chans;
477 }
478
479 start = hw->cpt_chan_base;
480 for (link = 0; link < hw->cpt_links; link++, nix_link++) {
481 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
482 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
483 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
484 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
485 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
486 start += cpt_chans;
487 }
488}
489
490static void rvu_nix_set_channels(struct rvu *rvu)
491{
492 int blkaddr = 0;
493
494 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
495 while (blkaddr) {
496 __rvu_nix_set_channels(rvu, blkaddr);
497 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
498 }
499}
500
501static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
502{
503 u64 cfg;
504
505 cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
506 cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
507
508
509
510
511 cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
512 cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
513 cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
514}
515
516static void rvu_rpm_set_channels(struct rvu *rvu)
517{
518 struct rvu_hwinfo *hw = rvu->hw;
519 u16 base = hw->cgx_chan_base;
520 int cgx, lmac;
521
522 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
523 for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
524 __rvu_rpm_set_channels(cgx, lmac, base);
525 base += 16;
526 }
527 }
528}
529
530void rvu_program_channels(struct rvu *rvu)
531{
532 struct rvu_hwinfo *hw = rvu->hw;
533
534 if (!hw->cap.programmable_chans)
535 return;
536
537 rvu_nix_set_channels(rvu);
538 rvu_lbk_set_channels(rvu);
539 rvu_rpm_set_channels(rvu);
540}
541