linux/include/xen/xen-ops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef INCLUDE_XEN_OPS_H
   3#define INCLUDE_XEN_OPS_H
   4
   5#include <linux/percpu.h>
   6#include <linux/notifier.h>
   7#include <linux/efi.h>
   8#include <xen/features.h>
   9#include <asm/xen/interface.h>
  10#include <xen/interface/vcpu.h>
  11
  12DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
  13
  14DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
  15static inline uint32_t xen_vcpu_nr(int cpu)
  16{
  17        return per_cpu(xen_vcpu_id, cpu);
  18}
  19
  20#define XEN_VCPU_ID_INVALID U32_MAX
  21
  22void xen_arch_pre_suspend(void);
  23void xen_arch_post_suspend(int suspend_cancelled);
  24
  25void xen_timer_resume(void);
  26void xen_arch_resume(void);
  27void xen_arch_suspend(void);
  28
  29void xen_reboot(int reason);
  30
  31void xen_resume_notifier_register(struct notifier_block *nb);
  32void xen_resume_notifier_unregister(struct notifier_block *nb);
  33
  34bool xen_vcpu_stolen(int vcpu);
  35void xen_setup_runstate_info(int cpu);
  36void xen_time_setup_guest(void);
  37void xen_manage_runstate_time(int action);
  38void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
  39u64 xen_steal_clock(int cpu);
  40
  41int xen_setup_shutdown_event(void);
  42
  43extern unsigned long *xen_contiguous_bitmap;
  44
  45#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  46int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
  47                                unsigned int address_bits,
  48                                dma_addr_t *dma_handle);
  49
  50void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
  51#else
  52static inline int xen_create_contiguous_region(phys_addr_t pstart,
  53                                               unsigned int order,
  54                                               unsigned int address_bits,
  55                                               dma_addr_t *dma_handle)
  56{
  57        return 0;
  58}
  59
  60static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
  61                                                 unsigned int order) { }
  62#endif
  63
  64#if defined(CONFIG_XEN_PV)
  65int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
  66                  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
  67                  unsigned int domid, bool no_translate, struct page **pages);
  68#else
  69static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
  70                                xen_pfn_t *pfn, int nr, int *err_ptr,
  71                                pgprot_t prot,  unsigned int domid,
  72                                bool no_translate, struct page **pages)
  73{
  74        BUG();
  75        return 0;
  76}
  77#endif
  78
  79struct vm_area_struct;
  80
  81#ifdef CONFIG_XEN_AUTO_XLATE
  82int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
  83                              unsigned long addr,
  84                              xen_pfn_t *gfn, int nr,
  85                              int *err_ptr, pgprot_t prot,
  86                              unsigned int domid,
  87                              struct page **pages);
  88int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
  89                              int nr, struct page **pages);
  90#else
  91/*
  92 * These two functions are called from arch/x86/xen/mmu.c and so stubs
  93 * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
  94 */
  95static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
  96                                            unsigned long addr,
  97                                            xen_pfn_t *gfn, int nr,
  98                                            int *err_ptr, pgprot_t prot,
  99                                            unsigned int domid,
 100                                            struct page **pages)
 101{
 102        return -EOPNOTSUPP;
 103}
 104
 105static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
 106                                            int nr, struct page **pages)
 107{
 108        return -EOPNOTSUPP;
 109}
 110#endif
 111
 112int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
 113                        unsigned long len);
 114
 115/*
 116 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
 117 * @vma:     VMA to map the pages into
 118 * @addr:    Address at which to map the pages
 119 * @gfn:     Array of GFNs to map
 120 * @nr:      Number entries in the GFN array
 121 * @err_ptr: Returns per-GFN error status.
 122 * @prot:    page protection mask
 123 * @domid:   Domain owning the pages
 124 * @pages:   Array of pages if this domain has an auto-translated physmap
 125 *
 126 * @gfn and @err_ptr may point to the same buffer, the GFNs will be
 127 * overwritten by the error codes after they are mapped.
 128 *
 129 * Returns the number of successfully mapped frames, or a -ve error
 130 * code.
 131 */
 132static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
 133                                             unsigned long addr,
 134                                             xen_pfn_t *gfn, int nr,
 135                                             int *err_ptr, pgprot_t prot,
 136                                             unsigned int domid,
 137                                             struct page **pages)
 138{
 139        if (xen_feature(XENFEAT_auto_translated_physmap))
 140                return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
 141                                                 prot, domid, pages);
 142
 143        /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
 144         * and the consequences later is quite hard to detect what the actual
 145         * cause of "wrong memory was mapped in".
 146         */
 147        BUG_ON(err_ptr == NULL);
 148        return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
 149                             false, pages);
 150}
 151
 152/*
 153 * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
 154 * @vma:     VMA to map the pages into
 155 * @addr:    Address at which to map the pages
 156 * @mfn:     Array of MFNs to map
 157 * @nr:      Number entries in the MFN array
 158 * @err_ptr: Returns per-MFN error status.
 159 * @prot:    page protection mask
 160 * @domid:   Domain owning the pages
 161 * @pages:   Array of pages if this domain has an auto-translated physmap
 162 *
 163 * @mfn and @err_ptr may point to the same buffer, the MFNs will be
 164 * overwritten by the error codes after they are mapped.
 165 *
 166 * Returns the number of successfully mapped frames, or a -ve error
 167 * code.
 168 */
 169static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
 170                                             unsigned long addr, xen_pfn_t *mfn,
 171                                             int nr, int *err_ptr,
 172                                             pgprot_t prot, unsigned int domid,
 173                                             struct page **pages)
 174{
 175        if (xen_feature(XENFEAT_auto_translated_physmap))
 176                return -EOPNOTSUPP;
 177
 178        return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
 179                             true, pages);
 180}
 181
 182/* xen_remap_domain_gfn_range() - map a range of foreign frames
 183 * @vma:     VMA to map the pages into
 184 * @addr:    Address at which to map the pages
 185 * @gfn:     First GFN to map.
 186 * @nr:      Number frames to map
 187 * @prot:    page protection mask
 188 * @domid:   Domain owning the pages
 189 * @pages:   Array of pages if this domain has an auto-translated physmap
 190 *
 191 * Returns the number of successfully mapped frames, or a -ve error
 192 * code.
 193 */
 194static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
 195                                             unsigned long addr,
 196                                             xen_pfn_t gfn, int nr,
 197                                             pgprot_t prot, unsigned int domid,
 198                                             struct page **pages)
 199{
 200        if (xen_feature(XENFEAT_auto_translated_physmap))
 201                return -EOPNOTSUPP;
 202
 203        return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
 204                             pages);
 205}
 206
 207int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
 208                               int numpgs, struct page **pages);
 209
 210int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
 211                                  unsigned long nr_grant_frames);
 212
 213bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
 214
 215efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc);
 216efi_status_t xen_efi_set_time(efi_time_t *tm);
 217efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 218                                     efi_time_t *tm);
 219efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm);
 220efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 221                                  u32 *attr, unsigned long *data_size,
 222                                  void *data);
 223efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
 224                                       efi_char16_t *name, efi_guid_t *vendor);
 225efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 226                                  u32 attr, unsigned long data_size,
 227                                  void *data);
 228efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
 229                                         u64 *remaining_space,
 230                                         u64 *max_variable_size);
 231efi_status_t xen_efi_get_next_high_mono_count(u32 *count);
 232efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
 233                                    unsigned long count, unsigned long sg_list);
 234efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
 235                                        unsigned long count, u64 *max_size,
 236                                        int *reset_type);
 237void xen_efi_reset_system(int reset_type, efi_status_t status,
 238                          unsigned long data_size, efi_char16_t *data);
 239
 240
 241#ifdef CONFIG_PREEMPT
 242
 243static inline void xen_preemptible_hcall_begin(void)
 244{
 245}
 246
 247static inline void xen_preemptible_hcall_end(void)
 248{
 249}
 250
 251#else
 252
 253DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
 254
 255static inline void xen_preemptible_hcall_begin(void)
 256{
 257        __this_cpu_write(xen_in_preemptible_hcall, true);
 258}
 259
 260static inline void xen_preemptible_hcall_end(void)
 261{
 262        __this_cpu_write(xen_in_preemptible_hcall, false);
 263}
 264
 265#endif /* CONFIG_PREEMPT */
 266
 267#endif /* INCLUDE_XEN_OPS_H */
 268