1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/slab.h>
26#include <linux/atomic.h>
27#include <linux/sched.h>
28#include <asm/spu.h>
29#include <asm/spu_csa.h>
30#include "spufs.h"
31#include "sputrace.h"
32
33
34atomic_t nr_spu_contexts = ATOMIC_INIT(0);
35
36struct spu_context *alloc_spu_context(struct spu_gang *gang)
37{
38 struct spu_context *ctx;
39
40 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
41 if (!ctx)
42 goto out;
43
44
45
46 if (spu_init_csa(&ctx->csa))
47 goto out_free;
48 spin_lock_init(&ctx->mmio_lock);
49 mutex_init(&ctx->mapping_lock);
50 kref_init(&ctx->kref);
51 mutex_init(&ctx->state_mutex);
52 mutex_init(&ctx->run_mutex);
53 init_waitqueue_head(&ctx->ibox_wq);
54 init_waitqueue_head(&ctx->wbox_wq);
55 init_waitqueue_head(&ctx->stop_wq);
56 init_waitqueue_head(&ctx->mfc_wq);
57 init_waitqueue_head(&ctx->run_wq);
58 ctx->state = SPU_STATE_SAVED;
59 ctx->ops = &spu_backing_ops;
60 ctx->owner = get_task_mm(current);
61 INIT_LIST_HEAD(&ctx->rq);
62 INIT_LIST_HEAD(&ctx->aff_list);
63 if (gang)
64 spu_gang_add_ctx(gang, ctx);
65
66 __spu_update_sched_info(ctx);
67 spu_set_timeslice(ctx);
68 ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
69 ctx->stats.tstamp = ktime_get_ns();
70
71 atomic_inc(&nr_spu_contexts);
72 goto out;
73out_free:
74 kfree(ctx);
75 ctx = NULL;
76out:
77 return ctx;
78}
79
80void destroy_spu_context(struct kref *kref)
81{
82 struct spu_context *ctx;
83 ctx = container_of(kref, struct spu_context, kref);
84 spu_context_nospu_trace(destroy_spu_context__enter, ctx);
85 mutex_lock(&ctx->state_mutex);
86 spu_deactivate(ctx);
87 mutex_unlock(&ctx->state_mutex);
88 spu_fini_csa(&ctx->csa);
89 if (ctx->gang)
90 spu_gang_remove_ctx(ctx->gang, ctx);
91 if (ctx->prof_priv_kref)
92 kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
93 BUG_ON(!list_empty(&ctx->rq));
94 atomic_dec(&nr_spu_contexts);
95 kfree(ctx->switch_log);
96 kfree(ctx);
97}
98
99struct spu_context * get_spu_context(struct spu_context *ctx)
100{
101 kref_get(&ctx->kref);
102 return ctx;
103}
104
105int put_spu_context(struct spu_context *ctx)
106{
107 return kref_put(&ctx->kref, &destroy_spu_context);
108}
109
110
111void spu_forget(struct spu_context *ctx)
112{
113 struct mm_struct *mm;
114
115
116
117
118
119
120 mutex_lock(&ctx->state_mutex);
121 if (ctx->state != SPU_STATE_SAVED)
122 spu_deactivate(ctx);
123
124 mm = ctx->owner;
125 ctx->owner = NULL;
126 mmput(mm);
127 spu_release(ctx);
128}
129
130void spu_unmap_mappings(struct spu_context *ctx)
131{
132 mutex_lock(&ctx->mapping_lock);
133 if (ctx->local_store)
134 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
135 if (ctx->mfc)
136 unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1);
137 if (ctx->cntl)
138 unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1);
139 if (ctx->signal1)
140 unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
141 if (ctx->signal2)
142 unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
143 if (ctx->mss)
144 unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1);
145 if (ctx->psmap)
146 unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1);
147 mutex_unlock(&ctx->mapping_lock);
148}
149
150
151
152
153
154int spu_acquire_saved(struct spu_context *ctx)
155{
156 int ret;
157
158 spu_context_nospu_trace(spu_acquire_saved__enter, ctx);
159
160 ret = spu_acquire(ctx);
161 if (ret)
162 return ret;
163
164 if (ctx->state != SPU_STATE_SAVED) {
165 set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
166 spu_deactivate(ctx);
167 }
168
169 return 0;
170}
171
172
173
174
175
176void spu_release_saved(struct spu_context *ctx)
177{
178 BUG_ON(ctx->state != SPU_STATE_SAVED);
179
180 if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) &&
181 test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
182 spu_activate(ctx, 0);
183
184 spu_release(ctx);
185}
186
187