1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/slab.h>
22
23#include <trace/events/host1x.h>
24
25#include "syncpt.h"
26#include "dev.h"
27#include "intr.h"
28#include "debug.h"
29
30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15
32
33static struct host1x_syncpt_base *
34host1x_syncpt_base_request(struct host1x *host)
35{
36 struct host1x_syncpt_base *bases = host->bases;
37 unsigned int i;
38
39 for (i = 0; i < host->info->nb_bases; i++)
40 if (!bases[i].requested)
41 break;
42
43 if (i >= host->info->nb_bases)
44 return NULL;
45
46 bases[i].requested = true;
47 return &bases[i];
48}
49
50static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
51{
52 if (base)
53 base->requested = false;
54}
55
56static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
57 struct device *dev,
58 unsigned long flags)
59{
60 int i;
61 struct host1x_syncpt *sp = host->syncpt;
62 char *name;
63
64 mutex_lock(&host->syncpt_mutex);
65
66 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
67 ;
68
69 if (i >= host->info->nb_pts)
70 goto unlock;
71
72 if (flags & HOST1X_SYNCPT_HAS_BASE) {
73 sp->base = host1x_syncpt_base_request(host);
74 if (!sp->base)
75 goto unlock;
76 }
77
78 name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
79 dev ? dev_name(dev) : NULL);
80 if (!name)
81 goto free_base;
82
83 sp->dev = dev;
84 sp->name = name;
85
86 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
87 sp->client_managed = true;
88 else
89 sp->client_managed = false;
90
91 mutex_unlock(&host->syncpt_mutex);
92 return sp;
93
94free_base:
95 host1x_syncpt_base_free(sp->base);
96 sp->base = NULL;
97unlock:
98 mutex_unlock(&host->syncpt_mutex);
99 return NULL;
100}
101
102u32 host1x_syncpt_id(struct host1x_syncpt *sp)
103{
104 return sp->id;
105}
106EXPORT_SYMBOL(host1x_syncpt_id);
107
108
109
110
111u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
112{
113 return (u32)atomic_add_return(incrs, &sp->max_val);
114}
115EXPORT_SYMBOL(host1x_syncpt_incr_max);
116
117
118
119
120void host1x_syncpt_restore(struct host1x *host)
121{
122 struct host1x_syncpt *sp_base = host->syncpt;
123 unsigned int i;
124
125 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
126 host1x_hw_syncpt_restore(host, sp_base + i);
127
128 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
129 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
130
131 wmb();
132}
133
134
135
136
137
138void host1x_syncpt_save(struct host1x *host)
139{
140 struct host1x_syncpt *sp_base = host->syncpt;
141 unsigned int i;
142
143 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
144 if (host1x_syncpt_client_managed(sp_base + i))
145 host1x_hw_syncpt_load(host, sp_base + i);
146 else
147 WARN_ON(!host1x_syncpt_idle(sp_base + i));
148 }
149
150 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
151 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
152}
153
154
155
156
157
158u32 host1x_syncpt_load(struct host1x_syncpt *sp)
159{
160 u32 val;
161
162 val = host1x_hw_syncpt_load(sp->host, sp);
163 trace_host1x_syncpt_load_min(sp->id, val);
164
165 return val;
166}
167
168
169
170
171u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
172{
173 host1x_hw_syncpt_load_wait_base(sp->host, sp);
174
175 return sp->base_val;
176}
177
178
179
180
181int host1x_syncpt_incr(struct host1x_syncpt *sp)
182{
183 return host1x_hw_syncpt_cpu_incr(sp->host, sp);
184}
185EXPORT_SYMBOL(host1x_syncpt_incr);
186
187
188
189
190
191static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
192{
193 host1x_hw_syncpt_load(sp->host, sp);
194
195 return host1x_syncpt_is_expired(sp, thresh);
196}
197
198
199
200
201int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
202 u32 *value)
203{
204 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
205 void *ref;
206 struct host1x_waitlist *waiter;
207 int err = 0, check_count = 0;
208 u32 val;
209
210 if (value)
211 *value = 0;
212
213
214 if (host1x_syncpt_is_expired(sp, thresh)) {
215 if (value)
216 *value = host1x_syncpt_load(sp);
217
218 return 0;
219 }
220
221
222 val = host1x_hw_syncpt_load(sp->host, sp);
223 if (host1x_syncpt_is_expired(sp, thresh)) {
224 if (value)
225 *value = val;
226
227 goto done;
228 }
229
230 if (!timeout) {
231 err = -EAGAIN;
232 goto done;
233 }
234
235
236 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
237 if (!waiter) {
238 err = -ENOMEM;
239 goto done;
240 }
241
242
243 err = host1x_intr_add_action(sp->host, sp->id, thresh,
244 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
245 &wq, waiter, &ref);
246 if (err)
247 goto done;
248
249 err = -EAGAIN;
250
251 if (timeout < 0)
252 timeout = LONG_MAX;
253
254
255 while (timeout) {
256 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
257 int remain;
258
259 remain = wait_event_interruptible_timeout(wq,
260 syncpt_load_min_is_expired(sp, thresh),
261 check);
262 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
263 if (value)
264 *value = host1x_syncpt_load(sp);
265
266 err = 0;
267
268 break;
269 }
270
271 if (remain < 0) {
272 err = remain;
273 break;
274 }
275
276 timeout -= check;
277
278 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
279 dev_warn(sp->host->dev,
280 "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
281 current->comm, sp->id, sp->name,
282 thresh, timeout);
283
284 host1x_debug_dump_syncpts(sp->host);
285
286 if (check_count == MAX_STUCK_CHECK_COUNT)
287 host1x_debug_dump(sp->host);
288
289 check_count++;
290 }
291 }
292
293 host1x_intr_put_ref(sp->host, sp->id, ref);
294
295done:
296 return err;
297}
298EXPORT_SYMBOL(host1x_syncpt_wait);
299
300
301
302
303bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
304{
305 u32 current_val;
306 u32 future_val;
307
308 smp_rmb();
309
310 current_val = (u32)atomic_read(&sp->min_val);
311 future_val = (u32)atomic_read(&sp->max_val);
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355 if (!host1x_syncpt_client_managed(sp))
356 return future_val - thresh >= current_val - thresh;
357 else
358 return (s32)(current_val - thresh) >= 0;
359}
360
361
362int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
363{
364 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
365}
366
367int host1x_syncpt_init(struct host1x *host)
368{
369 struct host1x_syncpt_base *bases;
370 struct host1x_syncpt *syncpt;
371 unsigned int i;
372
373 syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
374 GFP_KERNEL);
375 if (!syncpt)
376 return -ENOMEM;
377
378 bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
379 GFP_KERNEL);
380 if (!bases)
381 return -ENOMEM;
382
383 for (i = 0; i < host->info->nb_pts; i++) {
384 syncpt[i].id = i;
385 syncpt[i].host = host;
386 }
387
388 for (i = 0; i < host->info->nb_bases; i++)
389 bases[i].id = i;
390
391 mutex_init(&host->syncpt_mutex);
392 host->syncpt = syncpt;
393 host->bases = bases;
394
395 host1x_syncpt_restore(host);
396
397
398 host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
399 if (!host->nop_sp)
400 return -ENOMEM;
401
402 return 0;
403}
404
405struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
406 unsigned long flags)
407{
408 struct host1x *host = dev_get_drvdata(dev->parent);
409
410 return host1x_syncpt_alloc(host, dev, flags);
411}
412EXPORT_SYMBOL(host1x_syncpt_request);
413
414void host1x_syncpt_free(struct host1x_syncpt *sp)
415{
416 if (!sp)
417 return;
418
419 mutex_lock(&sp->host->syncpt_mutex);
420
421 host1x_syncpt_base_free(sp->base);
422 kfree(sp->name);
423 sp->base = NULL;
424 sp->dev = NULL;
425 sp->name = NULL;
426 sp->client_managed = false;
427
428 mutex_unlock(&sp->host->syncpt_mutex);
429}
430EXPORT_SYMBOL(host1x_syncpt_free);
431
432void host1x_syncpt_deinit(struct host1x *host)
433{
434 struct host1x_syncpt *sp = host->syncpt;
435 unsigned int i;
436
437 for (i = 0; i < host->info->nb_pts; i++, sp++)
438 kfree(sp->name);
439}
440
441
442
443
444
445u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
446{
447 smp_rmb();
448
449 return (u32)atomic_read(&sp->max_val);
450}
451EXPORT_SYMBOL(host1x_syncpt_read_max);
452
453
454
455
456u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
457{
458 smp_rmb();
459
460 return (u32)atomic_read(&sp->min_val);
461}
462EXPORT_SYMBOL(host1x_syncpt_read_min);
463
464u32 host1x_syncpt_read(struct host1x_syncpt *sp)
465{
466 return host1x_syncpt_load(sp);
467}
468EXPORT_SYMBOL(host1x_syncpt_read);
469
470unsigned int host1x_syncpt_nb_pts(struct host1x *host)
471{
472 return host->info->nb_pts;
473}
474
475unsigned int host1x_syncpt_nb_bases(struct host1x *host)
476{
477 return host->info->nb_bases;
478}
479
480unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
481{
482 return host->info->nb_mlocks;
483}
484
485struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
486{
487 if (id >= host->info->nb_pts)
488 return NULL;
489
490 return host->syncpt + id;
491}
492EXPORT_SYMBOL(host1x_syncpt_get);
493
494struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
495{
496 return sp ? sp->base : NULL;
497}
498EXPORT_SYMBOL(host1x_syncpt_get_base);
499
500u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
501{
502 return base->id;
503}
504EXPORT_SYMBOL(host1x_syncpt_base_id);
505