linux/drivers/gpu/drm/drm_cache.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  29 */
  30
  31#include <linux/export.h>
  32#include <drm/drmP.h>
  33
  34#if defined(CONFIG_X86)
  35#include <asm/smp.h>
  36
  37/*
  38 * clflushopt is an unordered instruction which needs fencing with mfence or
  39 * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
  40 * in the caller.
  41 */
  42static void
  43drm_clflush_page(struct page *page)
  44{
  45        uint8_t *page_virtual;
  46        unsigned int i;
  47        const int size = boot_cpu_data.x86_clflush_size;
  48
  49        if (unlikely(page == NULL))
  50                return;
  51
  52        page_virtual = kmap_atomic(page);
  53        for (i = 0; i < PAGE_SIZE; i += size)
  54                clflushopt(page_virtual + i);
  55        kunmap_atomic(page_virtual);
  56}
  57
  58static void drm_cache_flush_clflush(struct page *pages[],
  59                                    unsigned long num_pages)
  60{
  61        unsigned long i;
  62
  63        mb();
  64        for (i = 0; i < num_pages; i++)
  65                drm_clflush_page(*pages++);
  66        mb();
  67}
  68#endif
  69
  70void
  71drm_clflush_pages(struct page *pages[], unsigned long num_pages)
  72{
  73
  74#if defined(CONFIG_X86)
  75        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
  76                drm_cache_flush_clflush(pages, num_pages);
  77                return;
  78        }
  79
  80        if (wbinvd_on_all_cpus())
  81                printk(KERN_ERR "Timed out waiting for cache flush.\n");
  82
  83#elif defined(__powerpc__)
  84        unsigned long i;
  85        for (i = 0; i < num_pages; i++) {
  86                struct page *page = pages[i];
  87                void *page_virtual;
  88
  89                if (unlikely(page == NULL))
  90                        continue;
  91
  92                page_virtual = kmap_atomic(page);
  93                flush_dcache_range((unsigned long)page_virtual,
  94                                   (unsigned long)page_virtual + PAGE_SIZE);
  95                kunmap_atomic(page_virtual);
  96        }
  97#else
  98        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
  99        WARN_ON_ONCE(1);
 100#endif
 101}
 102EXPORT_SYMBOL(drm_clflush_pages);
 103
 104void
 105drm_clflush_sg(struct sg_table *st)
 106{
 107#if defined(CONFIG_X86)
 108        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 109                struct sg_page_iter sg_iter;
 110
 111                mb();
 112                for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
 113                        drm_clflush_page(sg_page_iter_page(&sg_iter));
 114                mb();
 115
 116                return;
 117        }
 118
 119        if (wbinvd_on_all_cpus())
 120                printk(KERN_ERR "Timed out waiting for cache flush.\n");
 121#else
 122        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
 123        WARN_ON_ONCE(1);
 124#endif
 125}
 126EXPORT_SYMBOL(drm_clflush_sg);
 127
 128void
 129drm_clflush_virt_range(void *addr, unsigned long length)
 130{
 131#if defined(CONFIG_X86)
 132        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 133                const int size = boot_cpu_data.x86_clflush_size;
 134                void *end = addr + length;
 135                addr = (void *)(((unsigned long)addr) & -size);
 136                mb();
 137                for (; addr < end; addr += size)
 138                        clflushopt(addr);
 139                clflushopt(end - 1); /* force serialisation */
 140                mb();
 141                return;
 142        }
 143
 144        if (wbinvd_on_all_cpus())
 145                printk(KERN_ERR "Timed out waiting for cache flush.\n");
 146#else
 147        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
 148        WARN_ON_ONCE(1);
 149#endif
 150}
 151EXPORT_SYMBOL(drm_clflush_virt_range);
 152