dpdk/lib/eal/arm/include/rte_cycles_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2015 Cavium, Inc
   3 * Copyright(c) 2020 Arm Limited
   4 */
   5
   6#ifndef _RTE_CYCLES_ARM64_H_
   7#define _RTE_CYCLES_ARM64_H_
   8
   9#ifdef __cplusplus
  10extern "C" {
  11#endif
  12
  13#include "generic/rte_cycles.h"
  14
  15/** Read generic counter frequency */
  16static __rte_always_inline uint64_t
  17__rte_arm64_cntfrq(void)
  18{
  19        uint64_t freq;
  20
  21        asm volatile("mrs %0, cntfrq_el0" : "=r" (freq));
  22        return freq;
  23}
  24
  25/** Read generic counter */
  26static __rte_always_inline uint64_t
  27__rte_arm64_cntvct(void)
  28{
  29        uint64_t tsc;
  30
  31        asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
  32        return tsc;
  33}
  34
  35static __rte_always_inline uint64_t
  36__rte_arm64_cntvct_precise(void)
  37{
  38        asm volatile("isb" : : : "memory");
  39        return __rte_arm64_cntvct();
  40}
  41
  42/**
  43 * Read the time base register.
  44 *
  45 * @return
  46 *   The time base for this lcore.
  47 */
  48#ifndef RTE_ARM_EAL_RDTSC_USE_PMU
  49/**
  50 * This call is portable to any ARMv8 architecture, however, typically
  51 * cntvct_el0 runs at <= 100MHz and it may be imprecise for some tasks.
  52 */
  53static __rte_always_inline uint64_t
  54rte_rdtsc(void)
  55{
  56        return __rte_arm64_cntvct();
  57}
  58#else
  59/**
  60 * This is an alternative method to enable rte_rdtsc() with high resolution
  61 * PMU cycles counter.The cycle counter runs at cpu frequency and this scheme
  62 * uses ARMv8 PMU subsystem to get the cycle counter at userspace, However,
  63 * access to PMU cycle counter from user space is not enabled by default in
  64 * arm64 linux kernel.
  65 * It is possible to enable cycle counter at user space access by configuring
  66 * the PMU from the privileged mode (kernel space).
  67 *
  68 * asm volatile("msr pmintenset_el1, %0" : : "r" ((u64)(0 << 31)));
  69 * asm volatile("msr pmcntenset_el0, %0" :: "r" BIT(31));
  70 * asm volatile("msr pmuserenr_el0, %0" : : "r"(BIT(0) | BIT(2)));
  71 * asm volatile("mrs %0, pmcr_el0" : "=r" (val));
  72 * val |= (BIT(0) | BIT(2));
  73 * isb();
  74 * asm volatile("msr pmcr_el0, %0" : : "r" (val));
  75 *
  76 */
  77
  78/** Read PMU cycle counter */
  79static __rte_always_inline uint64_t
  80__rte_arm64_pmccntr(void)
  81{
  82        uint64_t tsc;
  83
  84        asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc));
  85        return tsc;
  86}
  87
  88static __rte_always_inline uint64_t
  89rte_rdtsc(void)
  90{
  91        return __rte_arm64_pmccntr();
  92}
  93#endif
  94
  95static __rte_always_inline uint64_t
  96rte_rdtsc_precise(void)
  97{
  98        asm volatile("isb" : : : "memory");
  99        return rte_rdtsc();
 100}
 101
 102static __rte_always_inline uint64_t
 103rte_get_tsc_cycles(void)
 104{
 105        return rte_rdtsc();
 106}
 107
 108#ifdef __cplusplus
 109}
 110#endif
 111
 112#endif /* _RTE_CYCLES_ARM64_H_ */
 113