linux/drivers/gpu/drm/i915/gvt/mpt.h
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21 * SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eddie Dong <eddie.dong@intel.com>
  25 *    Dexuan Cui
  26 *    Jike Song <jike.song@intel.com>
  27 *
  28 * Contributors:
  29 *    Zhi Wang <zhi.a.wang@intel.com>
  30 *
  31 */
  32
  33#ifndef _GVT_MPT_H_
  34#define _GVT_MPT_H_
  35
  36#include "gvt.h"
  37
  38/**
  39 * DOC: Hypervisor Service APIs for GVT-g Core Logic
  40 *
  41 * This is the glue layer between specific hypervisor MPT modules and GVT-g core
  42 * logic. Each kind of hypervisor MPT module provides a collection of function
  43 * callbacks and will be attached to GVT host when the driver is loading.
  44 * GVT-g core logic will call these APIs to request specific services from
  45 * hypervisor.
  46 */
  47
  48/**
  49 * intel_gvt_hypervisor_host_init - init GVT-g host side
  50 *
  51 * Returns:
  52 * Zero on success, negative error code if failed
  53 */
  54static inline int intel_gvt_hypervisor_host_init(struct device *dev,
  55                                                 void *gvt, const void *ops)
  56{
  57        if (!intel_gvt_host.mpt->host_init)
  58                return -ENODEV;
  59
  60        return intel_gvt_host.mpt->host_init(dev, gvt, ops);
  61}
  62
  63/**
  64 * intel_gvt_hypervisor_host_exit - exit GVT-g host side
  65 */
  66static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
  67{
  68        /* optional to provide */
  69        if (!intel_gvt_host.mpt->host_exit)
  70                return;
  71
  72        intel_gvt_host.mpt->host_exit(dev, gvt);
  73}
  74
  75/**
  76 * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
  77 * related stuffs inside hypervisor.
  78 *
  79 * Returns:
  80 * Zero on success, negative error code if failed.
  81 */
  82static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
  83{
  84        /* optional to provide */
  85        if (!intel_gvt_host.mpt->attach_vgpu)
  86                return 0;
  87
  88        return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
  89}
  90
  91/**
  92 * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
  93 * related stuffs inside hypervisor.
  94 *
  95 * Returns:
  96 * Zero on success, negative error code if failed.
  97 */
  98static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
  99{
 100        /* optional to provide */
 101        if (!intel_gvt_host.mpt->detach_vgpu)
 102                return;
 103
 104        intel_gvt_host.mpt->detach_vgpu(vgpu);
 105}
 106
 107#define MSI_CAP_CONTROL(offset) (offset + 2)
 108#define MSI_CAP_ADDRESS(offset) (offset + 4)
 109#define MSI_CAP_DATA(offset) (offset + 8)
 110#define MSI_CAP_EN 0x1
 111
 112/**
 113 * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
 114 *
 115 * Returns:
 116 * Zero on success, negative error code if failed.
 117 */
 118static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
 119{
 120        unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
 121        u16 control, data;
 122        u32 addr;
 123        int ret;
 124
 125        control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
 126        addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
 127        data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
 128
 129        /* Do not generate MSI if MSIEN is disable */
 130        if (!(control & MSI_CAP_EN))
 131                return 0;
 132
 133        if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
 134                return -EINVAL;
 135
 136        trace_inject_msi(vgpu->id, addr, data);
 137
 138        ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
 139        if (ret)
 140                return ret;
 141        return 0;
 142}
 143
 144/**
 145 * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
 146 * @p: host kernel virtual address
 147 *
 148 * Returns:
 149 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
 150 */
 151static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
 152{
 153        return intel_gvt_host.mpt->from_virt_to_mfn(p);
 154}
 155
 156/**
 157 * intel_gvt_hypervisor_enable_page_track - track a guest page
 158 * @vgpu: a vGPU
 159 * @gfn: the gfn of guest
 160 *
 161 * Returns:
 162 * Zero on success, negative error code if failed.
 163 */
 164static inline int intel_gvt_hypervisor_enable_page_track(
 165                struct intel_vgpu *vgpu, unsigned long gfn)
 166{
 167        return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
 168}
 169
 170/**
 171 * intel_gvt_hypervisor_disable_page_track - untrack a guest page
 172 * @vgpu: a vGPU
 173 * @gfn: the gfn of guest
 174 *
 175 * Returns:
 176 * Zero on success, negative error code if failed.
 177 */
 178static inline int intel_gvt_hypervisor_disable_page_track(
 179                struct intel_vgpu *vgpu, unsigned long gfn)
 180{
 181        return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
 182}
 183
 184/**
 185 * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
 186 * @vgpu: a vGPU
 187 * @gpa: guest physical address
 188 * @buf: host data buffer
 189 * @len: data length
 190 *
 191 * Returns:
 192 * Zero on success, negative error code if failed.
 193 */
 194static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
 195                unsigned long gpa, void *buf, unsigned long len)
 196{
 197        return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
 198}
 199
 200/**
 201 * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
 202 * @vgpu: a vGPU
 203 * @gpa: guest physical address
 204 * @buf: host data buffer
 205 * @len: data length
 206 *
 207 * Returns:
 208 * Zero on success, negative error code if failed.
 209 */
 210static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
 211                unsigned long gpa, void *buf, unsigned long len)
 212{
 213        return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
 214}
 215
 216/**
 217 * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
 218 * @vgpu: a vGPU
 219 * @gpfn: guest pfn
 220 *
 221 * Returns:
 222 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
 223 */
 224static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
 225                struct intel_vgpu *vgpu, unsigned long gfn)
 226{
 227        return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
 228}
 229
 230/**
 231 * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
 232 * @vgpu: a vGPU
 233 * @gfn: guest pfn
 234 * @size: page size
 235 * @dma_addr: retrieve allocated dma addr
 236 *
 237 * Returns:
 238 * 0 on success, negative error code if failed.
 239 */
 240static inline int intel_gvt_hypervisor_dma_map_guest_page(
 241                struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
 242                dma_addr_t *dma_addr)
 243{
 244        return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
 245                                                      dma_addr);
 246}
 247
 248/**
 249 * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
 250 * @vgpu: a vGPU
 251 * @dma_addr: the mapped dma addr
 252 */
 253static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
 254                struct intel_vgpu *vgpu, dma_addr_t dma_addr)
 255{
 256        intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
 257}
 258
 259/**
 260 * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
 261 * @vgpu: a vGPU
 262 * @dma_addr: guest dma addr
 263 *
 264 * Returns:
 265 * 0 on success, negative error code if failed.
 266 */
 267static inline int
 268intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
 269                                        dma_addr_t dma_addr)
 270{
 271        return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
 272}
 273
 274/**
 275 * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
 276 * @vgpu: a vGPU
 277 * @gfn: guest PFN
 278 * @mfn: host PFN
 279 * @nr: amount of PFNs
 280 * @map: map or unmap
 281 *
 282 * Returns:
 283 * Zero on success, negative error code if failed.
 284 */
 285static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
 286                struct intel_vgpu *vgpu, unsigned long gfn,
 287                unsigned long mfn, unsigned int nr,
 288                bool map)
 289{
 290        /* a MPT implementation could have MMIO mapped elsewhere */
 291        if (!intel_gvt_host.mpt->map_gfn_to_mfn)
 292                return 0;
 293
 294        return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
 295                                                  map);
 296}
 297
 298/**
 299 * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
 300 * @vgpu: a vGPU
 301 * @start: the beginning of the guest physical address region
 302 * @end: the end of the guest physical address region
 303 * @map: map or unmap
 304 *
 305 * Returns:
 306 * Zero on success, negative error code if failed.
 307 */
 308static inline int intel_gvt_hypervisor_set_trap_area(
 309                struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
 310{
 311        /* a MPT implementation could have MMIO trapped elsewhere */
 312        if (!intel_gvt_host.mpt->set_trap_area)
 313                return 0;
 314
 315        return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
 316}
 317
 318/**
 319 * intel_gvt_hypervisor_set_opregion - Set opregion for guest
 320 * @vgpu: a vGPU
 321 *
 322 * Returns:
 323 * Zero on success, negative error code if failed.
 324 */
 325static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
 326{
 327        if (!intel_gvt_host.mpt->set_opregion)
 328                return 0;
 329
 330        return intel_gvt_host.mpt->set_opregion(vgpu);
 331}
 332
 333/**
 334 * intel_gvt_hypervisor_set_edid - Set EDID region for guest
 335 * @vgpu: a vGPU
 336 * @port_num: display port number
 337 *
 338 * Returns:
 339 * Zero on success, negative error code if failed.
 340 */
 341static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
 342                                                int port_num)
 343{
 344        if (!intel_gvt_host.mpt->set_edid)
 345                return 0;
 346
 347        return intel_gvt_host.mpt->set_edid(vgpu, port_num);
 348}
 349
 350/**
 351 * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
 352 * @vgpu: a vGPU
 353 *
 354 * Returns:
 355 * Zero on success, negative error code if failed.
 356 */
 357static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
 358{
 359        if (!intel_gvt_host.mpt->get_vfio_device)
 360                return 0;
 361
 362        return intel_gvt_host.mpt->get_vfio_device(vgpu);
 363}
 364
 365/**
 366 * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
 367 * @vgpu: a vGPU
 368 *
 369 * Returns:
 370 * Zero on success, negative error code if failed.
 371 */
 372static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
 373{
 374        if (!intel_gvt_host.mpt->put_vfio_device)
 375                return;
 376
 377        intel_gvt_host.mpt->put_vfio_device(vgpu);
 378}
 379
 380/**
 381 * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
 382 * @vgpu: a vGPU
 383 * @gfn: guest PFN
 384 *
 385 * Returns:
 386 * true on valid gfn, false on not.
 387 */
 388static inline bool intel_gvt_hypervisor_is_valid_gfn(
 389                struct intel_vgpu *vgpu, unsigned long gfn)
 390{
 391        if (!intel_gvt_host.mpt->is_valid_gfn)
 392                return true;
 393
 394        return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
 395}
 396
 397int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *);
 398void intel_gvt_unregister_hypervisor(void);
 399
 400#endif /* _GVT_MPT_H_ */
 401