linux/include/linux/hmm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Copyright 2013 Red Hat Inc.
   4 *
   5 * Authors: Jérôme Glisse <jglisse@redhat.com>
   6 *
   7 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
   8 */
   9#ifndef LINUX_HMM_H
  10#define LINUX_HMM_H
  11
  12#include <linux/kconfig.h>
  13#include <asm/pgtable.h>
  14
  15#include <linux/device.h>
  16#include <linux/migrate.h>
  17#include <linux/memremap.h>
  18#include <linux/completion.h>
  19#include <linux/mmu_notifier.h>
  20
  21/*
  22 * hmm_pfn_flag_e - HMM flag enums
  23 *
  24 * Flags:
  25 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
  26 * HMM_PFN_WRITE: CPU page table has write permission set
  27 *
  28 * The driver provides a flags array for mapping page protections to device
  29 * PTE bits. If the driver valid bit for an entry is bit 3,
  30 * i.e., (entry & (1 << 3)), then the driver must provide
  31 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
  32 * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
  33 * except that this is per device driver rather than per architecture.
  34 */
  35enum hmm_pfn_flag_e {
  36        HMM_PFN_VALID = 0,
  37        HMM_PFN_WRITE,
  38        HMM_PFN_FLAG_MAX
  39};
  40
  41/*
  42 * hmm_pfn_value_e - HMM pfn special value
  43 *
  44 * Flags:
  45 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
  46 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
  47 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
  48 *      result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
  49 *      be mirrored by a device, because the entry will never have HMM_PFN_VALID
  50 *      set and the pfn value is undefined.
  51 *
  52 * Driver provides values for none entry, error entry, and special entry.
  53 * Driver can alias (i.e., use same value) error and special, but
  54 * it should not alias none with error or special.
  55 *
  56 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
  57 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
  58 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
  59 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
  60 */
  61enum hmm_pfn_value_e {
  62        HMM_PFN_ERROR,
  63        HMM_PFN_NONE,
  64        HMM_PFN_SPECIAL,
  65        HMM_PFN_VALUE_MAX
  66};
  67
  68/*
  69 * struct hmm_range - track invalidation lock on virtual address range
  70 *
  71 * @notifier: a mmu_interval_notifier that includes the start/end
  72 * @notifier_seq: result of mmu_interval_read_begin()
  73 * @start: range virtual start address (inclusive)
  74 * @end: range virtual end address (exclusive)
  75 * @pfns: array of pfns (big enough for the range)
  76 * @flags: pfn flags to match device driver page table
  77 * @values: pfn value for some special case (none, special, error, ...)
  78 * @default_flags: default flags for the range (write, read, ... see hmm doc)
  79 * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
  80 * @pfn_shift: pfn shift value (should be <= PAGE_SHIFT)
  81 * @dev_private_owner: owner of device private pages
  82 */
  83struct hmm_range {
  84        struct mmu_interval_notifier *notifier;
  85        unsigned long           notifier_seq;
  86        unsigned long           start;
  87        unsigned long           end;
  88        uint64_t                *pfns;
  89        const uint64_t          *flags;
  90        const uint64_t          *values;
  91        uint64_t                default_flags;
  92        uint64_t                pfn_flags_mask;
  93        uint8_t                 pfn_shift;
  94        void                    *dev_private_owner;
  95};
  96
  97/*
  98 * hmm_device_entry_to_page() - return struct page pointed to by a device entry
  99 * @range: range use to decode device entry value
 100 * @entry: device entry value to get corresponding struct page from
 101 * Return: struct page pointer if entry is a valid, NULL otherwise
 102 *
 103 * If the device entry is valid (ie valid flag set) then return the struct page
 104 * matching the entry value. Otherwise return NULL.
 105 */
 106static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
 107                                                    uint64_t entry)
 108{
 109        if (entry == range->values[HMM_PFN_NONE])
 110                return NULL;
 111        if (entry == range->values[HMM_PFN_ERROR])
 112                return NULL;
 113        if (entry == range->values[HMM_PFN_SPECIAL])
 114                return NULL;
 115        if (!(entry & range->flags[HMM_PFN_VALID]))
 116                return NULL;
 117        return pfn_to_page(entry >> range->pfn_shift);
 118}
 119
 120/*
 121 * Please see Documentation/vm/hmm.rst for how to use the range API.
 122 */
 123long hmm_range_fault(struct hmm_range *range);
 124
 125/*
 126 * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
 127 *
 128 * When waiting for mmu notifiers we need some kind of time out otherwise we
 129 * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
 130 * wait already.
 131 */
 132#define HMM_RANGE_DEFAULT_TIMEOUT 1000
 133
 134#endif /* LINUX_HMM_H */
 135