1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#define pr_fmt(fmt) "[TTM] " fmt
34
35#include <drm/ttm/ttm_module.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_page_alloc.h>
38#include <drm/ttm/ttm_placement.h>
39#include <linux/agp_backend.h>
40#include <linux/module.h>
41#include <linux/slab.h>
42#include <linux/io.h>
43#include <asm/agp.h>
44
45struct ttm_agp_backend {
46 struct ttm_tt ttm;
47 struct agp_memory *mem;
48 struct agp_bridge_data *bridge;
49};
50
51int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
52{
53 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
54 struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
55 struct drm_mm_node *node = bo_mem->mm_node;
56 struct agp_memory *mem;
57 int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
58 unsigned i;
59
60 if (agp_be->mem)
61 return 0;
62
63 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
64 if (unlikely(mem == NULL))
65 return -ENOMEM;
66
67 mem->page_count = 0;
68 for (i = 0; i < ttm->num_pages; i++) {
69 struct page *page = ttm->pages[i];
70
71 if (!page)
72 page = dummy_read_page;
73
74 mem->pages[mem->page_count++] = page;
75 }
76 agp_be->mem = mem;
77
78 mem->is_flushed = 1;
79 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
80
81 ret = agp_bind_memory(mem, node->start);
82 if (ret)
83 pr_err("AGP Bind memory failed\n");
84
85 return ret;
86}
87EXPORT_SYMBOL(ttm_agp_bind);
88
89void ttm_agp_unbind(struct ttm_tt *ttm)
90{
91 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
92
93 if (agp_be->mem) {
94 if (agp_be->mem->is_bound) {
95 agp_unbind_memory(agp_be->mem);
96 return;
97 }
98 agp_free_memory(agp_be->mem);
99 agp_be->mem = NULL;
100 }
101}
102EXPORT_SYMBOL(ttm_agp_unbind);
103
104bool ttm_agp_is_bound(struct ttm_tt *ttm)
105{
106 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
107
108 if (!ttm)
109 return false;
110
111 return (agp_be->mem != NULL);
112}
113EXPORT_SYMBOL(ttm_agp_is_bound);
114
115void ttm_agp_destroy(struct ttm_tt *ttm)
116{
117 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
118
119 if (agp_be->mem)
120 ttm_agp_unbind(ttm);
121 ttm_tt_fini(ttm);
122 kfree(agp_be);
123}
124EXPORT_SYMBOL(ttm_agp_destroy);
125
126struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
127 struct agp_bridge_data *bridge,
128 uint32_t page_flags)
129{
130 struct ttm_agp_backend *agp_be;
131
132 agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
133 if (!agp_be)
134 return NULL;
135
136 agp_be->mem = NULL;
137 agp_be->bridge = bridge;
138
139 if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
140 kfree(agp_be);
141 return NULL;
142 }
143
144 return &agp_be->ttm;
145}
146EXPORT_SYMBOL(ttm_agp_tt_create);
147