1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <asm/octeon/octeon.h>
33
34#include <asm/octeon/cvmx-config.h>
35#include <asm/octeon/cvmx-pko.h>
36#include <asm/octeon/cvmx-helper.h>
37
38
39
40
41
42static int __cvmx_pko_int(int interface, int index)
43{
44 switch (interface) {
45 case 0:
46 return index;
47 case 1:
48 return 4;
49 case 2:
50 return index + 0x08;
51 case 3:
52 return index + 0x0c;
53 case 4:
54 return index + 0x10;
55 case 5:
56 return 0x1c;
57 case 6:
58 return 0x1d;
59 case 7:
60 return 0x1e;
61 case 8:
62 return 0x1f;
63 default:
64 return -1;
65 }
66}
67
68static void __cvmx_pko_iport_config(int pko_port)
69{
70 int queue;
71 const int num_queues = 1;
72 const int base_queue = pko_port;
73 const int static_priority_end = 1;
74 const int static_priority_base = 1;
75
76 for (queue = 0; queue < num_queues; queue++) {
77 union cvmx_pko_mem_iqueue_ptrs config;
78 cvmx_cmd_queue_result_t cmd_res;
79 uint64_t *buf_ptr;
80
81 config.u64 = 0;
82 config.s.index = queue;
83 config.s.qid = base_queue + queue;
84 config.s.ipid = pko_port;
85 config.s.tail = (queue == (num_queues - 1));
86 config.s.s_tail = (queue == static_priority_end);
87 config.s.static_p = (static_priority_base >= 0);
88 config.s.static_q = (queue <= static_priority_end);
89 config.s.qos_mask = 0xff;
90
91 cmd_res = cvmx_cmd_queue_initialize(
92 CVMX_CMD_QUEUE_PKO(base_queue + queue),
93 CVMX_PKO_MAX_QUEUE_DEPTH,
94 CVMX_FPA_OUTPUT_BUFFER_POOL,
95 (CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
96 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
97
98 WARN(cmd_res,
99 "%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n",
100 __func__, (int)cmd_res, pko_port, base_queue,
101 num_queues, queue);
102
103 buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer(
104 CVMX_CMD_QUEUE_PKO(base_queue + queue));
105 config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
106 CVMX_SYNCWS;
107 cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
108 }
109}
110
111static void __cvmx_pko_queue_alloc_o68(void)
112{
113 int port;
114
115 for (port = 0; port < 48; port++)
116 __cvmx_pko_iport_config(port);
117}
118
119static void __cvmx_pko_port_map_o68(void)
120{
121 int port;
122 int interface, index;
123 cvmx_helper_interface_mode_t mode;
124 union cvmx_pko_mem_iport_ptrs config;
125
126
127
128
129 config.u64 = 0;
130 config.s.eid = 31;
131 for (port = 0; port < 128; port++) {
132 config.s.ipid = port;
133 cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
134 }
135
136
137
138
139 for (port = 0; port < 48; port++) {
140 interface = cvmx_helper_get_interface_num(port);
141 index = cvmx_helper_get_interface_index_num(port);
142 mode = cvmx_helper_interface_get_mode(interface);
143 if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
144 continue;
145
146 config.s.ipid = port;
147 config.s.qos_mask = 0xff;
148 config.s.crc = 1;
149 config.s.min_pkt = 1;
150 config.s.intr = __cvmx_pko_int(interface, index);
151 config.s.eid = config.s.intr;
152 config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
153 index : port;
154 cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
155 }
156}
157
158static void __cvmx_pko_chip_init(void)
159{
160 int i;
161
162 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
163 __cvmx_pko_port_map_o68();
164 __cvmx_pko_queue_alloc_o68();
165 return;
166 }
167
168
169
170
171 for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) {
172 const uint64_t priority = 8;
173
174 cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
175 &priority);
176 }
177}
178
179
180
181
182
183
184
185void cvmx_pko_initialize_global(void)
186{
187 union cvmx_pko_reg_cmd_buf config;
188
189
190
191
192
193
194 config.u64 = 0;
195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
196 config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
197
198 cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
199
200
201
202
203 __cvmx_pko_chip_init();
204
205
206
207
208
209 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
210 || OCTEON_IS_MODEL(OCTEON_CN56XX)
211 || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
212 int num_interfaces = cvmx_helper_get_number_of_interfaces();
213 int last_port =
214 cvmx_helper_get_last_ipd_port(num_interfaces - 1);
215 int max_queues =
216 cvmx_pko_get_base_queue(last_port) +
217 cvmx_pko_get_num_queues(last_port);
218 if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
219 if (max_queues <= 32)
220 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
221 else if (max_queues <= 64)
222 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
223 } else {
224 if (max_queues <= 64)
225 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
226 else if (max_queues <= 128)
227 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
228 }
229 }
230}
231
232
233
234
235
236void cvmx_pko_enable(void)
237{
238 union cvmx_pko_reg_flags flags;
239
240 flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
241 if (flags.s.ena_pko)
242 cvmx_dprintf
243 ("Warning: Enabling PKO when PKO already enabled.\n");
244
245 flags.s.ena_dwb = 1;
246 flags.s.ena_pko = 1;
247
248
249
250
251 flags.s.store_be = 1;
252 cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
253}
254
255
256
257
258void cvmx_pko_disable(void)
259{
260 union cvmx_pko_reg_flags pko_reg_flags;
261 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
262 pko_reg_flags.s.ena_pko = 0;
263 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
264}
265EXPORT_SYMBOL_GPL(cvmx_pko_disable);
266
267
268
269
270static void __cvmx_pko_reset(void)
271{
272 union cvmx_pko_reg_flags pko_reg_flags;
273 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
274 pko_reg_flags.s.reset = 1;
275 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
276}
277
278
279
280
281void cvmx_pko_shutdown(void)
282{
283 union cvmx_pko_mem_queue_ptrs config;
284 int queue;
285
286 cvmx_pko_disable();
287
288 for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
289 config.u64 = 0;
290 config.s.tail = 1;
291 config.s.index = 0;
292 config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
293 config.s.queue = queue & 0x7f;
294 config.s.qos_mask = 0;
295 config.s.buf_ptr = 0;
296 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
297 union cvmx_pko_reg_queue_ptrs1 config1;
298 config1.u64 = 0;
299 config1.s.qid7 = queue >> 7;
300 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
301 }
302 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
303 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
304 }
305 __cvmx_pko_reset();
306}
307EXPORT_SYMBOL_GPL(cvmx_pko_shutdown);
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
327 uint64_t num_queues,
328 const uint64_t priority[])
329{
330 cvmx_pko_status_t result_code;
331 uint64_t queue;
332 union cvmx_pko_mem_queue_ptrs config;
333 union cvmx_pko_reg_queue_ptrs1 config1;
334 int static_priority_base = -1;
335 int static_priority_end = -1;
336
337 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
338 return CVMX_PKO_SUCCESS;
339
340 if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
341 && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
342 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
343 (unsigned long long)port);
344 return CVMX_PKO_INVALID_PORT;
345 }
346
347 if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
348 cvmx_dprintf
349 ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
350 (unsigned long long)(base_queue + num_queues));
351 return CVMX_PKO_INVALID_QUEUE;
352 }
353
354 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
355
356
357
358
359
360 for (queue = 0; queue < num_queues; queue++) {
361
362 if (static_priority_base == -1
363 && priority[queue] ==
364 CVMX_PKO_QUEUE_STATIC_PRIORITY)
365 static_priority_base = queue;
366
367 if (static_priority_base != -1
368 && static_priority_end == -1
369 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
370 && queue)
371 static_priority_end = queue - 1;
372 else if (static_priority_base != -1
373 && static_priority_end == -1
374 && queue == num_queues - 1)
375
376 static_priority_end = queue;
377
378
379
380
381
382
383 if (static_priority_end != -1
384 && (int)queue > static_priority_end
385 && priority[queue] ==
386 CVMX_PKO_QUEUE_STATIC_PRIORITY) {
387 cvmx_dprintf("ERROR: cvmx_pko_config_port: "
388 "Static priority queues aren't "
389 "contiguous or don't start at "
390 "base queue. q: %d, eq: %d\n",
391 (int)queue, static_priority_end);
392 return CVMX_PKO_INVALID_PRIORITY;
393 }
394 }
395 if (static_priority_base > 0) {
396 cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
397 "priority queues don't start at base "
398 "queue. sq: %d\n",
399 static_priority_base);
400 return CVMX_PKO_INVALID_PRIORITY;
401 }
402#if 0
403 cvmx_dprintf("Port %d: Static priority queue base: %d, "
404 "end: %d\n", port,
405 static_priority_base, static_priority_end);
406#endif
407 }
408
409
410
411
412
413
414 result_code = CVMX_PKO_SUCCESS;
415
416#ifdef PKO_DEBUG
417 cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
418 CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
419 CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
420#endif
421
422 for (queue = 0; queue < num_queues; queue++) {
423 uint64_t *buf_ptr = NULL;
424
425 config1.u64 = 0;
426 config1.s.idx3 = queue >> 3;
427 config1.s.qid7 = (base_queue + queue) >> 7;
428
429 config.u64 = 0;
430 config.s.tail = queue == (num_queues - 1);
431 config.s.index = queue;
432 config.s.port = port;
433 config.s.queue = base_queue + queue;
434
435 if (!cvmx_octeon_is_pass1()) {
436 config.s.static_p = static_priority_base >= 0;
437 config.s.static_q = (int)queue <= static_priority_end;
438 config.s.s_tail = (int)queue == static_priority_end;
439 }
440
441
442
443
444
445 switch ((int)priority[queue]) {
446 case 0:
447 config.s.qos_mask = 0x00;
448 break;
449 case 1:
450 config.s.qos_mask = 0x01;
451 break;
452 case 2:
453 config.s.qos_mask = 0x11;
454 break;
455 case 3:
456 config.s.qos_mask = 0x49;
457 break;
458 case 4:
459 config.s.qos_mask = 0x55;
460 break;
461 case 5:
462 config.s.qos_mask = 0x57;
463 break;
464 case 6:
465 config.s.qos_mask = 0x77;
466 break;
467 case 7:
468 config.s.qos_mask = 0x7f;
469 break;
470 case 8:
471 config.s.qos_mask = 0xff;
472 break;
473 case CVMX_PKO_QUEUE_STATIC_PRIORITY:
474 if (!cvmx_octeon_is_pass1()) {
475 config.s.qos_mask = 0xff;
476 break;
477 }
478 fallthrough;
479 default:
480 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
481 "priority %llu\n",
482 (unsigned long long)priority[queue]);
483 config.s.qos_mask = 0xff;
484 result_code = CVMX_PKO_INVALID_PRIORITY;
485 break;
486 }
487
488 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
489 cvmx_cmd_queue_result_t cmd_res =
490 cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
491 (base_queue + queue),
492 CVMX_PKO_MAX_QUEUE_DEPTH,
493 CVMX_FPA_OUTPUT_BUFFER_POOL,
494 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
495 -
496 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
497 * 8);
498 if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
499 switch (cmd_res) {
500 case CVMX_CMD_QUEUE_NO_MEMORY:
501 cvmx_dprintf("ERROR: "
502 "cvmx_pko_config_port: "
503 "Unable to allocate "
504 "output buffer.\n");
505 return CVMX_PKO_NO_MEMORY;
506 case CVMX_CMD_QUEUE_ALREADY_SETUP:
507 cvmx_dprintf
508 ("ERROR: cvmx_pko_config_port: Port already setup.\n");
509 return CVMX_PKO_PORT_ALREADY_SETUP;
510 case CVMX_CMD_QUEUE_INVALID_PARAM:
511 default:
512 cvmx_dprintf
513 ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
514 return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
515 }
516 }
517
518 buf_ptr =
519 (uint64_t *)
520 cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
521 (base_queue + queue));
522 config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
523 } else
524 config.s.buf_ptr = 0;
525
526 CVMX_SYNCWS;
527
528 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
529 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
530 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
531 }
532
533 return result_code;
534}
535
536#ifdef PKO_DEBUG
537
538
539
540void cvmx_pko_show_queue_map()
541{
542 int core, port;
543 int pko_output_ports = 36;
544
545 cvmx_dprintf("port");
546 for (port = 0; port < pko_output_ports; port++)
547 cvmx_dprintf("%3d ", port);
548 cvmx_dprintf("\n");
549
550 for (core = 0; core < CVMX_MAX_CORES; core++) {
551 cvmx_dprintf("\n%2d: ", core);
552 for (port = 0; port < pko_output_ports; port++) {
553 cvmx_dprintf("%3d ",
554 cvmx_pko_get_base_queue_per_core(port,
555 core));
556 }
557 }
558 cvmx_dprintf("\n");
559}
560#endif
561
562
563
564
565
566
567
568
569
570
571
572
573int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
574{
575 union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
576 union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
577
578 pko_mem_port_rate0.u64 = 0;
579 pko_mem_port_rate0.s.pid = port;
580 pko_mem_port_rate0.s.rate_pkt =
581 cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
582
583 pko_mem_port_rate0.s.rate_word = 0;
584
585 pko_mem_port_rate1.u64 = 0;
586 pko_mem_port_rate1.s.pid = port;
587 pko_mem_port_rate1.s.rate_lim =
588 ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
589
590 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
591 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
592 return 0;
593}
594
595
596
597
598
599
600
601
602
603
604
605
606int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
607{
608 union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
609 union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
610 uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
611 uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
612
613 pko_mem_port_rate0.u64 = 0;
614 pko_mem_port_rate0.s.pid = port;
615
616
617
618
619
620
621 pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
622
623 pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
624
625 pko_mem_port_rate1.u64 = 0;
626 pko_mem_port_rate1.s.pid = port;
627 pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
628
629 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
630 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
631 return 0;
632}
633