linux/arch/csky/include/asm/asid.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_ASM_ASID_H
   3#define __ASM_ASM_ASID_H
   4
   5#include <linux/atomic.h>
   6#include <linux/compiler.h>
   7#include <linux/cpumask.h>
   8#include <linux/percpu.h>
   9#include <linux/spinlock.h>
  10
  11struct asid_info
  12{
  13        atomic64_t      generation;
  14        unsigned long   *map;
  15        atomic64_t __percpu     *active;
  16        u64 __percpu            *reserved;
  17        u32                     bits;
  18        /* Lock protecting the structure */
  19        raw_spinlock_t          lock;
  20        /* Which CPU requires context flush on next call */
  21        cpumask_t               flush_pending;
  22        /* Number of ASID allocated by context (shift value) */
  23        unsigned int            ctxt_shift;
  24        /* Callback to locally flush the context. */
  25        void                    (*flush_cpu_ctxt_cb)(void);
  26};
  27
  28#define NUM_ASIDS(info)                 (1UL << ((info)->bits))
  29#define NUM_CTXT_ASIDS(info)            (NUM_ASIDS(info) >> (info)->ctxt_shift)
  30
  31#define active_asid(info, cpu)  *per_cpu_ptr((info)->active, cpu)
  32
  33void asid_new_context(struct asid_info *info, atomic64_t *pasid,
  34                      unsigned int cpu, struct mm_struct *mm);
  35
  36/*
  37 * Check the ASID is still valid for the context. If not generate a new ASID.
  38 *
  39 * @pasid: Pointer to the current ASID batch
  40 * @cpu: current CPU ID. Must have been acquired throught get_cpu()
  41 */
  42static inline void asid_check_context(struct asid_info *info,
  43                                      atomic64_t *pasid, unsigned int cpu,
  44                                      struct mm_struct *mm)
  45{
  46        u64 asid, old_active_asid;
  47
  48        asid = atomic64_read(pasid);
  49
  50        /*
  51         * The memory ordering here is subtle.
  52         * If our active_asid is non-zero and the ASID matches the current
  53         * generation, then we update the active_asid entry with a relaxed
  54         * cmpxchg. Racing with a concurrent rollover means that either:
  55         *
  56         * - We get a zero back from the cmpxchg and end up waiting on the
  57         *   lock. Taking the lock synchronises with the rollover and so
  58         *   we are forced to see the updated generation.
  59         *
  60         * - We get a valid ASID back from the cmpxchg, which means the
  61         *   relaxed xchg in flush_context will treat us as reserved
  62         *   because atomic RmWs are totally ordered for a given location.
  63         */
  64        old_active_asid = atomic64_read(&active_asid(info, cpu));
  65        if (old_active_asid &&
  66            !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
  67            atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
  68                                     old_active_asid, asid))
  69                return;
  70
  71        asid_new_context(info, pasid, cpu, mm);
  72}
  73
  74int asid_allocator_init(struct asid_info *info,
  75                        u32 bits, unsigned int asid_per_ctxt,
  76                        void (*flush_cpu_ctxt_cb)(void));
  77
  78#endif
  79