linux/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#include "display/intel_frontbuffer.h"
   8
   9#include "i915_drv.h"
  10#include "i915_gem_clflush.h"
  11#include "i915_sw_fence_work.h"
  12#include "i915_trace.h"
  13
  14struct clflush {
  15        struct dma_fence_work base;
  16        struct drm_i915_gem_object *obj;
  17};
  18
  19static void __do_clflush(struct drm_i915_gem_object *obj)
  20{
  21        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
  22        drm_clflush_sg(obj->mm.pages);
  23
  24        i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
  25}
  26
  27static int clflush_work(struct dma_fence_work *base)
  28{
  29        struct clflush *clflush = container_of(base, typeof(*clflush), base);
  30        struct drm_i915_gem_object *obj = clflush->obj;
  31        int err;
  32
  33        err = i915_gem_object_pin_pages(obj);
  34        if (err)
  35                return err;
  36
  37        __do_clflush(obj);
  38        i915_gem_object_unpin_pages(obj);
  39
  40        return 0;
  41}
  42
  43static void clflush_release(struct dma_fence_work *base)
  44{
  45        struct clflush *clflush = container_of(base, typeof(*clflush), base);
  46
  47        i915_gem_object_put(clflush->obj);
  48}
  49
  50static const struct dma_fence_work_ops clflush_ops = {
  51        .name = "clflush",
  52        .work = clflush_work,
  53        .release = clflush_release,
  54};
  55
  56static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
  57{
  58        struct clflush *clflush;
  59
  60        GEM_BUG_ON(!obj->cache_dirty);
  61
  62        clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
  63        if (!clflush)
  64                return NULL;
  65
  66        dma_fence_work_init(&clflush->base, &clflush_ops);
  67        clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
  68
  69        return clflush;
  70}
  71
  72bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
  73                             unsigned int flags)
  74{
  75        struct clflush *clflush;
  76
  77        assert_object_held(obj);
  78
  79        /*
  80         * Stolen memory is always coherent with the GPU as it is explicitly
  81         * marked as wc by the system, or the system is cache-coherent.
  82         * Similarly, we only access struct pages through the CPU cache, so
  83         * anything not backed by physical memory we consider to be always
  84         * coherent and not need clflushing.
  85         */
  86        if (!i915_gem_object_has_struct_page(obj)) {
  87                obj->cache_dirty = false;
  88                return false;
  89        }
  90
  91        /* If the GPU is snooping the contents of the CPU cache,
  92         * we do not need to manually clear the CPU cache lines.  However,
  93         * the caches are only snooped when the render cache is
  94         * flushed/invalidated.  As we always have to emit invalidations
  95         * and flushes when moving into and out of the RENDER domain, correct
  96         * snooping behaviour occurs naturally as the result of our domain
  97         * tracking.
  98         */
  99        if (!(flags & I915_CLFLUSH_FORCE) &&
 100            obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
 101                return false;
 102
 103        trace_i915_gem_object_clflush(obj);
 104
 105        clflush = NULL;
 106        if (!(flags & I915_CLFLUSH_SYNC))
 107                clflush = clflush_work_create(obj);
 108        if (clflush) {
 109                i915_sw_fence_await_reservation(&clflush->base.chain,
 110                                                obj->base.resv, NULL, true,
 111                                                i915_fence_timeout(to_i915(obj->base.dev)),
 112                                                I915_FENCE_GFP);
 113                dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
 114                dma_fence_work_commit(&clflush->base);
 115        } else if (obj->mm.pages) {
 116                __do_clflush(obj);
 117        } else {
 118                GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
 119        }
 120
 121        obj->cache_dirty = false;
 122        return true;
 123}
 124