linux/include/drm/drm_backport.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License v2. See the file COPYING in the main directory of this archive for
   6 * more details.
   7 */
   8
   9#ifndef DRM_BACKPORT_H_
  10#define DRM_BACKPORT_H_
  11
  12#include <linux/hrtimer.h>
  13#include <linux/err.h>
  14#include <linux/io.h>
  15#include <linux/console.h>
  16
  17
  18#include <linux/time64.h>
  19static inline time64_t ktime_get_real_seconds(void)
  20{
  21        return get_seconds();
  22}
  23
  24/**
  25 * ktime_mono_to_real - Convert monotonic time to clock realtime
  26 */
  27static inline ktime_t ktime_mono_to_real(ktime_t mono)
  28{
  29        return ktime_sub(mono, ktime_get_monotonic_offset());
  30}
  31
  32static inline void get_monotonic_boottime64(struct timespec64 *ts)
  33{
  34        *ts = ktime_to_timespec64(ktime_get_boottime());
  35}
  36
  37/*
  38 *
  39 */
  40
  41/**
  42 * list_last_entry - get the last element from a list
  43 * @ptr:        the list head to take the element from.
  44 * @type:       the type of the struct this is embedded in.
  45 * @member:     the name of the list_struct within the struct.
  46 *
  47 * Note, that list is expected to be not empty.
  48 */
  49#define list_last_entry(ptr, type, member) \
  50        list_entry((ptr)->prev, type, member)
  51
  52
  53#define module_param_named_unsafe(name, value, type, perm)              \
  54        module_param_named(name, value, type, perm)
  55#define module_param_unsafe(name, type, perm)                   \
  56        module_param(name, type, perm)
  57
  58/*
  59 *
  60 */
  61
  62#include <linux/mm.h>
  63
  64#define SHRINK_STOP (~0UL)
  65/*
  66 * A callback you can register to apply pressure to ageable caches.
  67 *
  68 * @count_objects should return the number of freeable items in the cache. If
  69 * there are no objects to free or the number of freeable items cannot be
  70 * determined, it should return 0. No deadlock checks should be done during the
  71 * count callback - the shrinker relies on aggregating scan counts that couldn't
  72 * be executed due to potential deadlocks to be run at a later call when the
  73 * deadlock condition is no longer pending.
  74 *
  75 * @scan_objects will only be called if @count_objects returned a non-zero
  76 * value for the number of freeable objects. The callout should scan the cache
  77 * and attempt to free items from the cache. It should then return the number
  78 * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
  79 * due to potential deadlocks. If SHRINK_STOP is returned, then no further
  80 * attempts to call the @scan_objects will be made from the current reclaim
  81 * context.
  82 *
  83 * @flags determine the shrinker abilities, like numa awareness
  84 */
  85struct shrinker2 {
  86        unsigned long (*count_objects)(struct shrinker2 *,
  87                                       struct shrink_control *sc);
  88        unsigned long (*scan_objects)(struct shrinker2 *,
  89                                      struct shrink_control *sc);
  90
  91        int seeks;      /* seeks to recreate an obj */
  92        long batch;     /* reclaim batch size, 0 = default */
  93        unsigned long flags;
  94
  95        /* These are for internal use */
  96        struct list_head list;
  97        /* objs pending delete, per node */
  98        atomic_long_t *nr_deferred;
  99
 100        /* compat: */
 101        struct shrinker compat;
 102};
 103int register_shrinker2(struct shrinker2 *shrinker);
 104void unregister_shrinker2(struct shrinker2 *shrinker);
 105
 106#define shrinker            shrinker2
 107#define register_shrinker   register_shrinker2
 108#define unregister_shrinker unregister_shrinker2
 109
 110/*
 111 *
 112 */
 113
 114extern struct workqueue_struct *system_power_efficient_wq;
 115
 116
 117/*
 118 *
 119 */
 120
 121#include <linux/rculist.h>
 122
 123/* stubs, we don't have mipi-dsi.. */
 124struct mipi_dsi_device;
 125struct mipi_dsi_packet;
 126struct mipi_dsi_msg;
 127static inline ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
 128                                  const void *data, size_t len)
 129{
 130        return -EINVAL;
 131}
 132
 133static inline ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
 134                               size_t size)
 135{
 136        return -EINVAL;
 137}
 138
 139static inline int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
 140                           const struct mipi_dsi_msg *msg)
 141{
 142        return -EINVAL;
 143}
 144
 145static inline int mipi_dsi_attach(struct mipi_dsi_device *dsi)
 146{
 147        return -ENOSYS;
 148}
 149
 150#define cpu_relax_lowlatency() cpu_relax()
 151#define pagefault_disabled()   in_atomic()
 152
 153static inline int arch_phys_wc_index(int handle)
 154{
 155#ifdef CONFIG_X86
 156        int phys_wc_to_mtrr_index(int handle);
 157        return phys_wc_to_mtrr_index(handle);
 158#else
 159        return -1;
 160#endif
 161}
 162
 163/*
 164 * avoiding/emulating 87521e16a7abbf3fa337f56cb4d1e18247f15e8a upstream:
 165 */
 166
 167enum acpi_backlight_type {
 168        acpi_backlight_undef = -1,
 169        acpi_backlight_none = 0,
 170        acpi_backlight_video,
 171        acpi_backlight_vendor,
 172        acpi_backlight_native,
 173};
 174
 175static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
 176{
 177        int acpi_video_backlight_support(void);
 178#if IS_ENABLED(CONFIG_ACPI_VIDEO)
 179        bool acpi_video_verify_backlight_support(void);
 180        if (acpi_video_backlight_support() &&
 181                        !acpi_video_verify_backlight_support())
 182                return acpi_backlight_native;
 183#else
 184        if (acpi_video_backlight_support())
 185                return acpi_backlight_native;
 186#endif
 187        return acpi_backlight_undef;
 188}
 189
 190static inline bool apple_gmux_present(void) { return false; }
 191static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
 192
 193/* cmpxchg_relaxed */
 194#ifndef cmpxchg_relaxed
 195#define  cmpxchg_relaxed                cmpxchg
 196#define  cmpxchg_acquire                cmpxchg
 197#define  cmpxchg_release                cmpxchg
 198#endif
 199
 200static inline int register_vmap_purge_notifier(struct notifier_block *nb)
 201{
 202        return 0;
 203}
 204
 205static inline int unregister_vmap_purge_notifier(struct notifier_block *nb)
 206{
 207        return 0;
 208}
 209
 210enum mutex_trylock_recursive_enum {
 211        MUTEX_TRYLOCK_FAILED    = 0,
 212        MUTEX_TRYLOCK_SUCCESS   = 1,
 213        MUTEX_TRYLOCK_RECURSIVE,
 214};
 215
 216static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 217{
 218        if (!mutex_is_locked(mutex))
 219                return false;
 220
 221#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
 222        return mutex->owner == task;
 223#else
 224        /* Since UP may be pre-empted, we cannot assume that we own the lock */
 225        return false;
 226#endif
 227}
 228
 229static inline __deprecated __must_check enum mutex_trylock_recursive_enum
 230mutex_trylock_recursive(struct mutex *lock)
 231{
 232        /* BACKPORT NOTE:
 233         * Different from upstream to avoid backporting
 234         * 3ca0ff571b092ee4d807f1168caa428d95b0173b, but functionally
 235         * equivalent for i915 to previous behavior
 236         */
 237        if (unlikely(mutex_is_locked_by(lock, current)))
 238                return MUTEX_TRYLOCK_RECURSIVE;
 239
 240        return mutex_trylock(lock);
 241}
 242
 243
 244/*
 245 * On x86 PAT systems we have memory tracking that keeps track of
 246 * the allowed mappings on memory ranges. This tracking works for
 247 * all the in-kernel mapping APIs (ioremap*), but where the user
 248 * wishes to map a range from a physical device into user memory
 249 * the tracking won't be updated. This API is to be used by
 250 * drivers which remap physical device pages into userspace,
 251 * and wants to make sure they are mapped WC and not UC.
 252 *
 253 * BACKPORT NOTES:  If:
 254 *
 255 *   87744ab3832b mm: fix cache mode tracking in vm_insert_mixed()
 256 *
 257 * gets backported, then we want to backport:
 258 *
 259 *   8ef4227615e1 x86/io: add interface to reserve io memtype for a resource range. (v1.1)
 260 *
 261 * and drop these next two stubs
 262 */
 263static inline int arch_io_reserve_memtype_wc(resource_size_t base,
 264                                             resource_size_t size)
 265{
 266        return 0;
 267}
 268
 269static inline void arch_io_free_memtype_wc(resource_size_t base,
 270                                           resource_size_t size)
 271{
 272}
 273
 274
 275static inline int __must_check down_write_killable(struct rw_semaphore *sem)
 276{
 277        down_write(sem);
 278        return 0;
 279}
 280
 281
 282static inline long __drm_get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 283                unsigned long start, unsigned long nr_pages, int write,
 284                int force, struct page **pages, struct vm_area_struct **vmas)
 285{
 286        return get_user_pages(tsk, mm, start, nr_pages, write, force, pages, vmas);
 287}
 288
 289#define get_user_pages_remote(c, mm, start, nr_pages, write, pages, vmas, locked) \
 290                __drm_get_user_pages(c, mm, start, nr_pages, write, 0, pages, vmas)
 291#define get_user_pages(start, nr_pages, write, pages, vmas) \
 292        __drm_get_user_pages(current, current->mm, start, nr_pages, write, 0, pages, vmas)
 293
 294
 295#define smp_store_mb(var, value)        set_mb(var, value)
 296
 297#ifndef atomic_set_release
 298#define  atomic_set_release(v, i)       smp_store_release(&(v)->counter, (i))
 299#endif
 300
 301#ifdef CONFIG_X86
 302#ifndef atomic_andnot
 303static inline void atomic_andnot(int i, atomic_t *v)
 304{
 305        atomic_and(~i, v);
 306}
 307#endif
 308#endif
 309
 310/* drm_panel stubs to make i915 happy.. I don't think we support any hw
 311 * using DSI and panel stuff without some work will be unhappy on power
 312 * or anything else w/ CONFIG_OF..
 313 */
 314struct drm_panel;
 315struct drm_connector;
 316static inline void drm_panel_init(struct drm_panel *panel) {}
 317static inline int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
 318{
 319        return -ENXIO;
 320}
 321static inline int drm_panel_detach(struct drm_panel *panel)
 322{
 323        return 0;
 324}
 325static inline int drm_panel_add(struct drm_panel *panel)
 326{
 327        return -ENXIO;
 328}
 329static inline void drm_panel_remove(struct drm_panel *panel) {}
 330
 331typedef wait_queue_t wait_queue_entry_t;
 332#define __add_wait_queue_entry_tail __add_wait_queue_tail
 333
 334unsigned int swiotlb_max_size(void);
 335#define swiotlb_max_segment swiotlb_max_size
 336
 337#define SLAB_TYPESAFE_BY_RCU SLAB_DESTROY_BY_RCU
 338
 339#include <linux/fs.h>
 340
 341static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
 342{
 343        return file->f_op->mmap(file, vma);
 344}
 345
 346static inline void mmgrab(struct mm_struct *mm)
 347{
 348        atomic_inc(&mm->mm_count);
 349}
 350
 351/*
 352 * since we just use get_user()/put_user() for unsafe_put_user()
 353 * and unsafe_get_user(), these can be no-op
 354 */
 355#define user_access_begin() do {} while (0)
 356#define user_access_end()   do {} while (0)
 357
 358#define unsafe_put_user(x, ptr, err_label)      \
 359do {                                            \
 360        int __pu_err = put_user(x, ptr);        \
 361        if (unlikely(__pu_err)) goto err_label; \
 362} while (0)
 363
 364#define unsafe_get_user(x, ptr, err_label)      \
 365do {                                            \
 366        int __gu_err = get_user(x, ptr);        \
 367        if (unlikely(__gu_err)) goto err_label; \
 368} while (0)
 369
 370/*
 371 * We don't have the commits in the rhel7 kernel which necessitate
 372 * this flag, so it is just zero.  Define it as an enum so if someone
 373 * does backport the pci/pm patches, it won't go unnoticed that this
 374 * needs to be removed.  See bac2a909a096c9110525c18cbb8ce73c660d5f71
 375 * and 4d071c3238987325b9e50e33051a40d1cce311cc upstream.
 376 */
 377enum {
 378        PCI_DEV_FLAGS_NEEDS_RESUME = 0,
 379};
 380
 381int __init drm_backport_init(void);
 382void __exit drm_backport_exit(void);
 383
 384#undef pr_fmt
 385
 386#endif /* DRM_BACKPORT_H_ */
 387