1
2
3
4
5
6
7
8
9#ifndef DRM_BACKPORT_H_
10#define DRM_BACKPORT_H_
11
12#include <linux/hrtimer.h>
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/console.h>
16
17
18#include <linux/time64.h>
19static inline time64_t ktime_get_real_seconds(void)
20{
21 return get_seconds();
22}
23
24
25
26
27static inline ktime_t ktime_mono_to_real(ktime_t mono)
28{
29 return ktime_sub(mono, ktime_get_monotonic_offset());
30}
31
32static inline void get_monotonic_boottime64(struct timespec64 *ts)
33{
34 *ts = ktime_to_timespec64(ktime_get_boottime());
35}
36
37
38
39
40
41
42
43
44
45
46
47
48
49#define list_last_entry(ptr, type, member) \
50 list_entry((ptr)->prev, type, member)
51
52
53#define module_param_named_unsafe(name, value, type, perm) \
54 module_param_named(name, value, type, perm)
55#define module_param_unsafe(name, type, perm) \
56 module_param(name, type, perm)
57
58
59
60
61
62#include <linux/mm.h>
63
64#define SHRINK_STOP (~0UL)
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85struct shrinker2 {
86 unsigned long (*count_objects)(struct shrinker2 *,
87 struct shrink_control *sc);
88 unsigned long (*scan_objects)(struct shrinker2 *,
89 struct shrink_control *sc);
90
91 int seeks;
92 long batch;
93 unsigned long flags;
94
95
96 struct list_head list;
97
98 atomic_long_t *nr_deferred;
99
100
101 struct shrinker compat;
102};
103int register_shrinker2(struct shrinker2 *shrinker);
104void unregister_shrinker2(struct shrinker2 *shrinker);
105
106#define shrinker shrinker2
107#define register_shrinker register_shrinker2
108#define unregister_shrinker unregister_shrinker2
109
110
111
112
113
114extern struct workqueue_struct *system_power_efficient_wq;
115
116
117
118
119
120
121#include <linux/rculist.h>
122
123
124struct mipi_dsi_device;
125struct mipi_dsi_packet;
126struct mipi_dsi_msg;
127static inline ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
128 const void *data, size_t len)
129{
130 return -EINVAL;
131}
132
133static inline ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
134 size_t size)
135{
136 return -EINVAL;
137}
138
139static inline int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
140 const struct mipi_dsi_msg *msg)
141{
142 return -EINVAL;
143}
144
145static inline int mipi_dsi_attach(struct mipi_dsi_device *dsi)
146{
147 return -ENOSYS;
148}
149
150#define cpu_relax_lowlatency() cpu_relax()
151#define pagefault_disabled() in_atomic()
152
153static inline int arch_phys_wc_index(int handle)
154{
155#ifdef CONFIG_X86
156 int phys_wc_to_mtrr_index(int handle);
157 return phys_wc_to_mtrr_index(handle);
158#else
159 return -1;
160#endif
161}
162
163
164
165
166
167enum acpi_backlight_type {
168 acpi_backlight_undef = -1,
169 acpi_backlight_none = 0,
170 acpi_backlight_video,
171 acpi_backlight_vendor,
172 acpi_backlight_native,
173};
174
175static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
176{
177 int acpi_video_backlight_support(void);
178#if IS_ENABLED(CONFIG_ACPI_VIDEO)
179 bool acpi_video_verify_backlight_support(void);
180 if (acpi_video_backlight_support() &&
181 !acpi_video_verify_backlight_support())
182 return acpi_backlight_native;
183#else
184 if (acpi_video_backlight_support())
185 return acpi_backlight_native;
186#endif
187 return acpi_backlight_undef;
188}
189
190static inline bool apple_gmux_present(void) { return false; }
191static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
192
193
194#ifndef cmpxchg_relaxed
195#define cmpxchg_relaxed cmpxchg
196#define cmpxchg_acquire cmpxchg
197#define cmpxchg_release cmpxchg
198#endif
199
200static inline int register_vmap_purge_notifier(struct notifier_block *nb)
201{
202 return 0;
203}
204
205static inline int unregister_vmap_purge_notifier(struct notifier_block *nb)
206{
207 return 0;
208}
209
210enum mutex_trylock_recursive_enum {
211 MUTEX_TRYLOCK_FAILED = 0,
212 MUTEX_TRYLOCK_SUCCESS = 1,
213 MUTEX_TRYLOCK_RECURSIVE,
214};
215
216static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
217{
218 if (!mutex_is_locked(mutex))
219 return false;
220
221#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
222 return mutex->owner == task;
223#else
224
225 return false;
226#endif
227}
228
229static inline __deprecated __must_check enum mutex_trylock_recursive_enum
230mutex_trylock_recursive(struct mutex *lock)
231{
232
233
234
235
236
237 if (unlikely(mutex_is_locked_by(lock, current)))
238 return MUTEX_TRYLOCK_RECURSIVE;
239
240 return mutex_trylock(lock);
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263static inline int arch_io_reserve_memtype_wc(resource_size_t base,
264 resource_size_t size)
265{
266 return 0;
267}
268
269static inline void arch_io_free_memtype_wc(resource_size_t base,
270 resource_size_t size)
271{
272}
273
274
275static inline int __must_check down_write_killable(struct rw_semaphore *sem)
276{
277 down_write(sem);
278 return 0;
279}
280
281
282static inline long __drm_get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
283 unsigned long start, unsigned long nr_pages, int write,
284 int force, struct page **pages, struct vm_area_struct **vmas)
285{
286 return get_user_pages(tsk, mm, start, nr_pages, write, force, pages, vmas);
287}
288
289#define get_user_pages_remote(c, mm, start, nr_pages, write, pages, vmas, locked) \
290 __drm_get_user_pages(c, mm, start, nr_pages, write, 0, pages, vmas)
291#define get_user_pages(start, nr_pages, write, pages, vmas) \
292 __drm_get_user_pages(current, current->mm, start, nr_pages, write, 0, pages, vmas)
293
294
295#define smp_store_mb(var, value) set_mb(var, value)
296
297#ifndef atomic_set_release
298#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
299#endif
300
301#ifdef CONFIG_X86
302#ifndef atomic_andnot
303static inline void atomic_andnot(int i, atomic_t *v)
304{
305 atomic_and(~i, v);
306}
307#endif
308#endif
309
310
311
312
313
314struct drm_panel;
315struct drm_connector;
316static inline void drm_panel_init(struct drm_panel *panel) {}
317static inline int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
318{
319 return -ENXIO;
320}
321static inline int drm_panel_detach(struct drm_panel *panel)
322{
323 return 0;
324}
325static inline int drm_panel_add(struct drm_panel *panel)
326{
327 return -ENXIO;
328}
329static inline void drm_panel_remove(struct drm_panel *panel) {}
330
331typedef wait_queue_t wait_queue_entry_t;
332#define __add_wait_queue_entry_tail __add_wait_queue_tail
333
334unsigned int swiotlb_max_size(void);
335#define swiotlb_max_segment swiotlb_max_size
336
337#define SLAB_TYPESAFE_BY_RCU SLAB_DESTROY_BY_RCU
338
339#include <linux/fs.h>
340
341static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
342{
343 return file->f_op->mmap(file, vma);
344}
345
346static inline void mmgrab(struct mm_struct *mm)
347{
348 atomic_inc(&mm->mm_count);
349}
350
351
352
353
354
355#define user_access_begin() do {} while (0)
356#define user_access_end() do {} while (0)
357
358#define unsafe_put_user(x, ptr, err_label) \
359do { \
360 int __pu_err = put_user(x, ptr); \
361 if (unlikely(__pu_err)) goto err_label; \
362} while (0)
363
364#define unsafe_get_user(x, ptr, err_label) \
365do { \
366 int __gu_err = get_user(x, ptr); \
367 if (unlikely(__gu_err)) goto err_label; \
368} while (0)
369
370
371
372
373
374
375
376
377enum {
378 PCI_DEV_FLAGS_NEEDS_RESUME = 0,
379};
380
381int __init drm_backport_init(void);
382void __exit drm_backport_exit(void);
383
384#undef pr_fmt
385
386#endif
387