linux/drivers/gpu/drm/i915/i915_gem_clflush.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include "i915_drv.h"
  26#include "intel_frontbuffer.h"
  27#include "i915_gem_clflush.h"
  28
  29static DEFINE_SPINLOCK(clflush_lock);
  30
  31struct clflush {
  32        struct dma_fence dma; /* Must be first for dma_fence_free() */
  33        struct i915_sw_fence wait;
  34        struct work_struct work;
  35        struct drm_i915_gem_object *obj;
  36};
  37
  38static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
  39{
  40        return DRIVER_NAME;
  41}
  42
  43static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
  44{
  45        return "clflush";
  46}
  47
  48static bool i915_clflush_enable_signaling(struct dma_fence *fence)
  49{
  50        return true;
  51}
  52
  53static void i915_clflush_release(struct dma_fence *fence)
  54{
  55        struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
  56
  57        i915_sw_fence_fini(&clflush->wait);
  58
  59        BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
  60        dma_fence_free(&clflush->dma);
  61}
  62
  63static const struct dma_fence_ops i915_clflush_ops = {
  64        .get_driver_name = i915_clflush_get_driver_name,
  65        .get_timeline_name = i915_clflush_get_timeline_name,
  66        .enable_signaling = i915_clflush_enable_signaling,
  67        .wait = dma_fence_default_wait,
  68        .release = i915_clflush_release,
  69};
  70
  71static void __i915_do_clflush(struct drm_i915_gem_object *obj)
  72{
  73        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
  74        drm_clflush_sg(obj->mm.pages);
  75        intel_fb_obj_flush(obj, ORIGIN_CPU);
  76}
  77
  78static void i915_clflush_work(struct work_struct *work)
  79{
  80        struct clflush *clflush = container_of(work, typeof(*clflush), work);
  81        struct drm_i915_gem_object *obj = clflush->obj;
  82
  83        if (i915_gem_object_pin_pages(obj)) {
  84                DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
  85                goto out;
  86        }
  87
  88        __i915_do_clflush(obj);
  89
  90        i915_gem_object_unpin_pages(obj);
  91
  92out:
  93        i915_gem_object_put(obj);
  94
  95        dma_fence_signal(&clflush->dma);
  96        dma_fence_put(&clflush->dma);
  97}
  98
  99static int __i915_sw_fence_call
 100i915_clflush_notify(struct i915_sw_fence *fence,
 101                    enum i915_sw_fence_notify state)
 102{
 103        struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
 104
 105        switch (state) {
 106        case FENCE_COMPLETE:
 107                schedule_work(&clflush->work);
 108                break;
 109
 110        case FENCE_FREE:
 111                dma_fence_put(&clflush->dma);
 112                break;
 113        }
 114
 115        return NOTIFY_DONE;
 116}
 117
 118bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 119                             unsigned int flags)
 120{
 121        struct clflush *clflush;
 122
 123        /*
 124         * Stolen memory is always coherent with the GPU as it is explicitly
 125         * marked as wc by the system, or the system is cache-coherent.
 126         * Similarly, we only access struct pages through the CPU cache, so
 127         * anything not backed by physical memory we consider to be always
 128         * coherent and not need clflushing.
 129         */
 130        if (!i915_gem_object_has_struct_page(obj)) {
 131                obj->cache_dirty = false;
 132                return false;
 133        }
 134
 135        /* If the GPU is snooping the contents of the CPU cache,
 136         * we do not need to manually clear the CPU cache lines.  However,
 137         * the caches are only snooped when the render cache is
 138         * flushed/invalidated.  As we always have to emit invalidations
 139         * and flushes when moving into and out of the RENDER domain, correct
 140         * snooping behaviour occurs naturally as the result of our domain
 141         * tracking.
 142         */
 143        if (!(flags & I915_CLFLUSH_FORCE) &&
 144            obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
 145                return false;
 146
 147        trace_i915_gem_object_clflush(obj);
 148
 149        clflush = NULL;
 150        if (!(flags & I915_CLFLUSH_SYNC))
 151                clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
 152        if (clflush) {
 153                GEM_BUG_ON(!obj->cache_dirty);
 154
 155                dma_fence_init(&clflush->dma,
 156                               &i915_clflush_ops,
 157                               &clflush_lock,
 158                               to_i915(obj->base.dev)->mm.unordered_timeline,
 159                               0);
 160                i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
 161
 162                clflush->obj = i915_gem_object_get(obj);
 163                INIT_WORK(&clflush->work, i915_clflush_work);
 164
 165                dma_fence_get(&clflush->dma);
 166
 167                i915_sw_fence_await_reservation(&clflush->wait,
 168                                                obj->resv, NULL,
 169                                                true, I915_FENCE_TIMEOUT,
 170                                                I915_FENCE_GFP);
 171
 172                reservation_object_lock(obj->resv, NULL);
 173                reservation_object_add_excl_fence(obj->resv, &clflush->dma);
 174                reservation_object_unlock(obj->resv);
 175
 176                i915_sw_fence_commit(&clflush->wait);
 177        } else if (obj->mm.pages) {
 178                __i915_do_clflush(obj);
 179        } else {
 180                GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
 181        }
 182
 183        obj->cache_dirty = false;
 184        return true;
 185}
 186