1
2
3
4
5
6
7#include <linux/pagevec.h>
8#include <linux/swap.h>
9
10#include "i915_drv.h"
11#include "i915_gem_object.h"
12#include "i915_scatterlist.h"
13
14
15
16
17
18static void check_release_pagevec(struct pagevec *pvec)
19{
20 check_move_unevictable_pages(pvec);
21 __pagevec_release(pvec);
22 cond_resched();
23}
24
25static int shmem_get_pages(struct drm_i915_gem_object *obj)
26{
27 struct drm_i915_private *i915 = to_i915(obj->base.dev);
28 const unsigned long page_count = obj->base.size / PAGE_SIZE;
29 unsigned long i;
30 struct address_space *mapping;
31 struct sg_table *st;
32 struct scatterlist *sg;
33 struct sgt_iter sgt_iter;
34 struct page *page;
35 unsigned long last_pfn = 0;
36 unsigned int max_segment = i915_sg_segment_size();
37 unsigned int sg_page_sizes;
38 struct pagevec pvec;
39 gfp_t noreclaim;
40 int ret;
41
42
43
44
45
46
47 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
48 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
49
50
51
52
53
54 if (page_count > totalram_pages())
55 return -ENOMEM;
56
57 st = kmalloc(sizeof(*st), GFP_KERNEL);
58 if (!st)
59 return -ENOMEM;
60
61rebuild_st:
62 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
63 kfree(st);
64 return -ENOMEM;
65 }
66
67
68
69
70
71
72
73 mapping = obj->base.filp->f_mapping;
74 mapping_set_unevictable(mapping);
75 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
76 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
77
78 sg = st->sgl;
79 st->nents = 0;
80 sg_page_sizes = 0;
81 for (i = 0; i < page_count; i++) {
82 const unsigned int shrink[] = {
83 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
84 0,
85 }, *s = shrink;
86 gfp_t gfp = noreclaim;
87
88 do {
89 cond_resched();
90 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
91 if (!IS_ERR(page))
92 break;
93
94 if (!*s) {
95 ret = PTR_ERR(page);
96 goto err_sg;
97 }
98
99 i915_gem_shrink(i915, 2 * page_count, NULL, *s++);
100
101
102
103
104
105
106
107
108
109
110 if (!*s) {
111
112 gfp = mapping_gfp_mask(mapping);
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128 gfp |= __GFP_RETRY_MAYFAIL;
129 }
130 } while (1);
131
132 if (!i ||
133 sg->length >= max_segment ||
134 page_to_pfn(page) != last_pfn + 1) {
135 if (i) {
136 sg_page_sizes |= sg->length;
137 sg = sg_next(sg);
138 }
139 st->nents++;
140 sg_set_page(sg, page, PAGE_SIZE, 0);
141 } else {
142 sg->length += PAGE_SIZE;
143 }
144 last_pfn = page_to_pfn(page);
145
146
147 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
148 }
149 if (sg) {
150 sg_page_sizes |= sg->length;
151 sg_mark_end(sg);
152 }
153
154
155 i915_sg_trim(st);
156
157 ret = i915_gem_gtt_prepare_pages(obj, st);
158 if (ret) {
159
160
161
162
163
164 if (max_segment > PAGE_SIZE) {
165 for_each_sgt_page(page, sgt_iter, st)
166 put_page(page);
167 sg_free_table(st);
168
169 max_segment = PAGE_SIZE;
170 goto rebuild_st;
171 } else {
172 dev_warn(&i915->drm.pdev->dev,
173 "Failed to DMA remap %lu pages\n",
174 page_count);
175 goto err_pages;
176 }
177 }
178
179 if (i915_gem_object_needs_bit17_swizzle(obj))
180 i915_gem_object_do_bit_17_swizzle(obj, st);
181
182 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
183
184 return 0;
185
186err_sg:
187 sg_mark_end(sg);
188err_pages:
189 mapping_clear_unevictable(mapping);
190 pagevec_init(&pvec);
191 for_each_sgt_page(page, sgt_iter, st) {
192 if (!pagevec_add(&pvec, page))
193 check_release_pagevec(&pvec);
194 }
195 if (pagevec_count(&pvec))
196 check_release_pagevec(&pvec);
197 sg_free_table(st);
198 kfree(st);
199
200
201
202
203
204
205
206
207
208
209 if (ret == -ENOSPC)
210 ret = -ENOMEM;
211
212 return ret;
213}
214
215static void
216shmem_truncate(struct drm_i915_gem_object *obj)
217{
218
219
220
221
222
223
224 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
225 obj->mm.madv = __I915_MADV_PURGED;
226 obj->mm.pages = ERR_PTR(-EFAULT);
227}
228
229static void
230shmem_writeback(struct drm_i915_gem_object *obj)
231{
232 struct address_space *mapping;
233 struct writeback_control wbc = {
234 .sync_mode = WB_SYNC_NONE,
235 .nr_to_write = SWAP_CLUSTER_MAX,
236 .range_start = 0,
237 .range_end = LLONG_MAX,
238 .for_reclaim = 1,
239 };
240 unsigned long i;
241
242
243
244
245
246
247
248 mapping = obj->base.filp->f_mapping;
249
250
251 for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
252 struct page *page;
253
254 page = find_lock_entry(mapping, i);
255 if (!page || xa_is_value(page))
256 continue;
257
258 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
259 int ret;
260
261 SetPageReclaim(page);
262 ret = mapping->a_ops->writepage(page, &wbc);
263 if (!PageWriteback(page))
264 ClearPageReclaim(page);
265 if (!ret)
266 goto put;
267 }
268 unlock_page(page);
269put:
270 put_page(page);
271 }
272}
273
274void
275__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
276 struct sg_table *pages,
277 bool needs_clflush)
278{
279 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
280
281 if (obj->mm.madv == I915_MADV_DONTNEED)
282 obj->mm.dirty = false;
283
284 if (needs_clflush &&
285 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
286 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
287 drm_clflush_sg(pages);
288
289 __start_cpu_write(obj);
290}
291
292static void
293shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
294{
295 struct sgt_iter sgt_iter;
296 struct pagevec pvec;
297 struct page *page;
298
299 __i915_gem_object_release_shmem(obj, pages, true);
300
301 i915_gem_gtt_finish_pages(obj, pages);
302
303 if (i915_gem_object_needs_bit17_swizzle(obj))
304 i915_gem_object_save_bit_17_swizzle(obj, pages);
305
306 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
307
308 pagevec_init(&pvec);
309 for_each_sgt_page(page, sgt_iter, pages) {
310 if (obj->mm.dirty)
311 set_page_dirty(page);
312
313 if (obj->mm.madv == I915_MADV_WILLNEED)
314 mark_page_accessed(page);
315
316 if (!pagevec_add(&pvec, page))
317 check_release_pagevec(&pvec);
318 }
319 if (pagevec_count(&pvec))
320 check_release_pagevec(&pvec);
321 obj->mm.dirty = false;
322
323 sg_free_table(pages);
324 kfree(pages);
325}
326
327static int
328shmem_pwrite(struct drm_i915_gem_object *obj,
329 const struct drm_i915_gem_pwrite *arg)
330{
331 struct address_space *mapping = obj->base.filp->f_mapping;
332 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
333 u64 remain, offset;
334 unsigned int pg;
335
336
337 GEM_BUG_ON(!access_ok(user_data, arg->size));
338
339
340
341
342
343
344
345
346
347
348 if (i915_gem_object_has_pages(obj))
349 return -ENODEV;
350
351 if (obj->mm.madv != I915_MADV_WILLNEED)
352 return -EFAULT;
353
354
355
356
357
358
359
360
361
362 remain = arg->size;
363 offset = arg->offset;
364 pg = offset_in_page(offset);
365
366 do {
367 unsigned int len, unwritten;
368 struct page *page;
369 void *data, *vaddr;
370 int err;
371 char c;
372
373 len = PAGE_SIZE - pg;
374 if (len > remain)
375 len = remain;
376
377
378 err = __get_user(c, user_data);
379 if (err)
380 return err;
381
382 err = __get_user(c, user_data + len - 1);
383 if (err)
384 return err;
385
386 err = pagecache_write_begin(obj->base.filp, mapping,
387 offset, len, 0,
388 &page, &data);
389 if (err < 0)
390 return err;
391
392 vaddr = kmap_atomic(page);
393 unwritten = __copy_from_user_inatomic(vaddr + pg,
394 user_data,
395 len);
396 kunmap_atomic(vaddr);
397
398 err = pagecache_write_end(obj->base.filp, mapping,
399 offset, len, len - unwritten,
400 page, data);
401 if (err < 0)
402 return err;
403
404
405 if (unwritten)
406 return -ENODEV;
407
408 remain -= len;
409 user_data += len;
410 offset += len;
411 pg = 0;
412 } while (remain);
413
414 return 0;
415}
416
417const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
418 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
419 I915_GEM_OBJECT_IS_SHRINKABLE,
420
421 .get_pages = shmem_get_pages,
422 .put_pages = shmem_put_pages,
423 .truncate = shmem_truncate,
424 .writeback = shmem_writeback,
425
426 .pwrite = shmem_pwrite,
427};
428
429static int create_shmem(struct drm_i915_private *i915,
430 struct drm_gem_object *obj,
431 size_t size)
432{
433 unsigned long flags = VM_NORESERVE;
434 struct file *filp;
435
436 drm_gem_private_object_init(&i915->drm, obj, size);
437
438 if (i915->mm.gemfs)
439 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
440 flags);
441 else
442 filp = shmem_file_setup("i915", size, flags);
443 if (IS_ERR(filp))
444 return PTR_ERR(filp);
445
446 obj->filp = filp;
447 return 0;
448}
449
450struct drm_i915_gem_object *
451i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
452{
453 struct drm_i915_gem_object *obj;
454 struct address_space *mapping;
455 unsigned int cache_level;
456 gfp_t mask;
457 int ret;
458
459
460
461
462
463
464 if (size >> PAGE_SHIFT > INT_MAX)
465 return ERR_PTR(-E2BIG);
466
467 if (overflows_type(size, obj->base.size))
468 return ERR_PTR(-E2BIG);
469
470 obj = i915_gem_object_alloc();
471 if (!obj)
472 return ERR_PTR(-ENOMEM);
473
474 ret = create_shmem(i915, &obj->base, size);
475 if (ret)
476 goto fail;
477
478 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
479 if (IS_I965GM(i915) || IS_I965G(i915)) {
480
481 mask &= ~__GFP_HIGHMEM;
482 mask |= __GFP_DMA32;
483 }
484
485 mapping = obj->base.filp->f_mapping;
486 mapping_set_gfp_mask(mapping, mask);
487 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
488
489 i915_gem_object_init(obj, &i915_gem_shmem_ops);
490
491 obj->write_domain = I915_GEM_DOMAIN_CPU;
492 obj->read_domains = I915_GEM_DOMAIN_CPU;
493
494 if (HAS_LLC(i915))
495
496
497
498
499
500
501
502
503
504
505
506
507 cache_level = I915_CACHE_LLC;
508 else
509 cache_level = I915_CACHE_NONE;
510
511 i915_gem_object_set_cache_coherency(obj, cache_level);
512
513 trace_i915_gem_object_create(obj);
514
515 return obj;
516
517fail:
518 i915_gem_object_free(obj);
519 return ERR_PTR(ret);
520}
521
522
523struct drm_i915_gem_object *
524i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
525 const void *data, size_t size)
526{
527 struct drm_i915_gem_object *obj;
528 struct file *file;
529 size_t offset;
530 int err;
531
532 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
533 if (IS_ERR(obj))
534 return obj;
535
536 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
537
538 file = obj->base.filp;
539 offset = 0;
540 do {
541 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
542 struct page *page;
543 void *pgdata, *vaddr;
544
545 err = pagecache_write_begin(file, file->f_mapping,
546 offset, len, 0,
547 &page, &pgdata);
548 if (err < 0)
549 goto fail;
550
551 vaddr = kmap(page);
552 memcpy(vaddr, data, len);
553 kunmap(page);
554
555 err = pagecache_write_end(file, file->f_mapping,
556 offset, len, len,
557 page, pgdata);
558 if (err < 0)
559 goto fail;
560
561 size -= len;
562 data += len;
563 offset += len;
564 } while (size);
565
566 return obj;
567
568fail:
569 i915_gem_object_put(obj);
570 return ERR_PTR(err);
571}
572