1
2
3
4
5
6
7
8#include "xfs.h"
9#include "xfs_fs.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_shared.h"
13#include "xfs_trans_resv.h"
14#include "xfs_mount.h"
15#include "xfs_alloc.h"
16#include "xfs_extent_busy.h"
17#include "xfs_trace.h"
18#include "xfs_trans.h"
19#include "xfs_log.h"
20#include "xfs_ag.h"
21
22void
23xfs_extent_busy_insert(
24 struct xfs_trans *tp,
25 struct xfs_perag *pag,
26 xfs_agblock_t bno,
27 xfs_extlen_t len,
28 unsigned int flags)
29{
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
32 struct rb_node **rbp;
33 struct rb_node *parent = NULL;
34
35 new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
36 new->agno = pag->pag_agno;
37 new->bno = bno;
38 new->length = len;
39 INIT_LIST_HEAD(&new->list);
40 new->flags = flags;
41
42
43 trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len);
44
45 spin_lock(&pag->pagb_lock);
46 rbp = &pag->pagb_tree.rb_node;
47 while (*rbp) {
48 parent = *rbp;
49 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
50
51 if (new->bno < busyp->bno) {
52 rbp = &(*rbp)->rb_left;
53 ASSERT(new->bno + new->length <= busyp->bno);
54 } else if (new->bno > busyp->bno) {
55 rbp = &(*rbp)->rb_right;
56 ASSERT(bno >= busyp->bno + busyp->length);
57 } else {
58 ASSERT(0);
59 }
60 }
61
62 rb_link_node(&new->rb_node, parent, rbp);
63 rb_insert_color(&new->rb_node, &pag->pagb_tree);
64
65 list_add(&new->list, &tp->t_busy);
66 spin_unlock(&pag->pagb_lock);
67}
68
69
70
71
72
73
74
75
76
77
78int
79xfs_extent_busy_search(
80 struct xfs_mount *mp,
81 struct xfs_perag *pag,
82 xfs_agblock_t bno,
83 xfs_extlen_t len)
84{
85 struct rb_node *rbp;
86 struct xfs_extent_busy *busyp;
87 int match = 0;
88
89
90 spin_lock(&pag->pagb_lock);
91 rbp = pag->pagb_tree.rb_node;
92 while (rbp) {
93 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
94 if (bno < busyp->bno) {
95
96 if (bno + len > busyp->bno)
97 match = -1;
98 rbp = rbp->rb_left;
99 } else if (bno > busyp->bno) {
100
101 if (bno < busyp->bno + busyp->length)
102 match = -1;
103 rbp = rbp->rb_right;
104 } else {
105
106 match = (busyp->length == len) ? 1 : -1;
107 break;
108 }
109 }
110 spin_unlock(&pag->pagb_lock);
111 return match;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125STATIC bool
126xfs_extent_busy_update_extent(
127 struct xfs_mount *mp,
128 struct xfs_perag *pag,
129 struct xfs_extent_busy *busyp,
130 xfs_agblock_t fbno,
131 xfs_extlen_t flen,
132 bool userdata) __releases(&pag->pagb_lock)
133 __acquires(&pag->pagb_lock)
134{
135 xfs_agblock_t fend = fbno + flen;
136 xfs_agblock_t bbno = busyp->bno;
137 xfs_agblock_t bend = bbno + busyp->length;
138
139
140
141
142
143
144 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
145 spin_unlock(&pag->pagb_lock);
146 delay(1);
147 spin_lock(&pag->pagb_lock);
148 return false;
149 }
150
151
152
153
154
155
156
157
158
159 if (userdata)
160 goto out_force_log;
161
162 if (bbno < fbno && bend > fend) {
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180 goto out_force_log;
181 } else if (bbno >= fbno && bend <= fend) {
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220 rb_erase(&busyp->rb_node, &pag->pagb_tree);
221 busyp->length = 0;
222 return false;
223 } else if (fend < bend) {
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238 busyp->bno = fend;
239 } else if (bbno < fbno) {
240
241
242
243
244
245
246
247
248
249
250
251
252
253 busyp->length = fbno - busyp->bno;
254 } else {
255 ASSERT(0);
256 }
257
258 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
259 return true;
260
261out_force_log:
262 spin_unlock(&pag->pagb_lock);
263 xfs_log_force(mp, XFS_LOG_SYNC);
264 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
265 spin_lock(&pag->pagb_lock);
266 return false;
267}
268
269
270
271
272
273void
274xfs_extent_busy_reuse(
275 struct xfs_mount *mp,
276 struct xfs_perag *pag,
277 xfs_agblock_t fbno,
278 xfs_extlen_t flen,
279 bool userdata)
280{
281 struct rb_node *rbp;
282
283 ASSERT(flen > 0);
284 spin_lock(&pag->pagb_lock);
285restart:
286 rbp = pag->pagb_tree.rb_node;
287 while (rbp) {
288 struct xfs_extent_busy *busyp =
289 rb_entry(rbp, struct xfs_extent_busy, rb_node);
290 xfs_agblock_t bbno = busyp->bno;
291 xfs_agblock_t bend = bbno + busyp->length;
292
293 if (fbno + flen <= bbno) {
294 rbp = rbp->rb_left;
295 continue;
296 } else if (fbno >= bend) {
297 rbp = rbp->rb_right;
298 continue;
299 }
300
301 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
302 userdata))
303 goto restart;
304 }
305 spin_unlock(&pag->pagb_lock);
306}
307
308
309
310
311
312
313
314
315
316
317
318
319
320bool
321xfs_extent_busy_trim(
322 struct xfs_alloc_arg *args,
323 xfs_agblock_t *bno,
324 xfs_extlen_t *len,
325 unsigned *busy_gen)
326{
327 xfs_agblock_t fbno;
328 xfs_extlen_t flen;
329 struct rb_node *rbp;
330 bool ret = false;
331
332 ASSERT(*len > 0);
333
334 spin_lock(&args->pag->pagb_lock);
335 fbno = *bno;
336 flen = *len;
337 rbp = args->pag->pagb_tree.rb_node;
338 while (rbp && flen >= args->minlen) {
339 struct xfs_extent_busy *busyp =
340 rb_entry(rbp, struct xfs_extent_busy, rb_node);
341 xfs_agblock_t fend = fbno + flen;
342 xfs_agblock_t bbno = busyp->bno;
343 xfs_agblock_t bend = bbno + busyp->length;
344
345 if (fend <= bbno) {
346 rbp = rbp->rb_left;
347 continue;
348 } else if (fbno >= bend) {
349 rbp = rbp->rb_right;
350 continue;
351 }
352
353 if (bbno <= fbno) {
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 if (fend <= bend)
384 goto fail;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403 fbno = bend;
404 } else if (bend >= fend) {
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424 fend = bbno;
425 } else {
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459 if (bbno - fbno >= args->maxlen) {
460
461 fend = bbno;
462 } else if (fend - bend >= args->maxlen * 4) {
463
464 fbno = bend;
465 } else if (bbno - fbno >= args->minlen) {
466
467 fend = bbno;
468 } else {
469 goto fail;
470 }
471 }
472
473 flen = fend - fbno;
474 }
475out:
476
477 if (fbno != *bno || flen != *len) {
478 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
479 fbno, flen);
480 *bno = fbno;
481 *len = flen;
482 *busy_gen = args->pag->pagb_gen;
483 ret = true;
484 }
485 spin_unlock(&args->pag->pagb_lock);
486 return ret;
487fail:
488
489
490
491
492 flen = 0;
493 goto out;
494}
495
496STATIC void
497xfs_extent_busy_clear_one(
498 struct xfs_mount *mp,
499 struct xfs_perag *pag,
500 struct xfs_extent_busy *busyp)
501{
502 if (busyp->length) {
503 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
504 busyp->length);
505 rb_erase(&busyp->rb_node, &pag->pagb_tree);
506 }
507
508 list_del_init(&busyp->list);
509 kmem_free(busyp);
510}
511
512static void
513xfs_extent_busy_put_pag(
514 struct xfs_perag *pag,
515 bool wakeup)
516 __releases(pag->pagb_lock)
517{
518 if (wakeup) {
519 pag->pagb_gen++;
520 wake_up_all(&pag->pagb_wait);
521 }
522
523 spin_unlock(&pag->pagb_lock);
524 xfs_perag_put(pag);
525}
526
527
528
529
530
531
532void
533xfs_extent_busy_clear(
534 struct xfs_mount *mp,
535 struct list_head *list,
536 bool do_discard)
537{
538 struct xfs_extent_busy *busyp, *n;
539 struct xfs_perag *pag = NULL;
540 xfs_agnumber_t agno = NULLAGNUMBER;
541 bool wakeup = false;
542
543 list_for_each_entry_safe(busyp, n, list, list) {
544 if (busyp->agno != agno) {
545 if (pag)
546 xfs_extent_busy_put_pag(pag, wakeup);
547 agno = busyp->agno;
548 pag = xfs_perag_get(mp, agno);
549 spin_lock(&pag->pagb_lock);
550 wakeup = false;
551 }
552
553 if (do_discard && busyp->length &&
554 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
555 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
556 } else {
557 xfs_extent_busy_clear_one(mp, pag, busyp);
558 wakeup = true;
559 }
560 }
561
562 if (pag)
563 xfs_extent_busy_put_pag(pag, wakeup);
564}
565
566
567
568
569void
570xfs_extent_busy_flush(
571 struct xfs_mount *mp,
572 struct xfs_perag *pag,
573 unsigned busy_gen)
574{
575 DEFINE_WAIT (wait);
576 int error;
577
578 error = xfs_log_force(mp, XFS_LOG_SYNC);
579 if (error)
580 return;
581
582 do {
583 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
584 if (busy_gen != READ_ONCE(pag->pagb_gen))
585 break;
586 schedule();
587 } while (1);
588
589 finish_wait(&pag->pagb_wait, &wait);
590}
591
592void
593xfs_extent_busy_wait_all(
594 struct xfs_mount *mp)
595{
596 struct xfs_perag *pag;
597 DEFINE_WAIT (wait);
598 xfs_agnumber_t agno;
599
600 for_each_perag(mp, agno, pag) {
601 do {
602 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
603 if (RB_EMPTY_ROOT(&pag->pagb_tree))
604 break;
605 schedule();
606 } while (1);
607 finish_wait(&pag->pagb_wait, &wait);
608 }
609}
610
611
612
613
614int
615xfs_extent_busy_ag_cmp(
616 void *priv,
617 const struct list_head *l1,
618 const struct list_head *l2)
619{
620 struct xfs_extent_busy *b1 =
621 container_of(l1, struct xfs_extent_busy, list);
622 struct xfs_extent_busy *b2 =
623 container_of(l2, struct xfs_extent_busy, list);
624 s32 diff;
625
626 diff = b1->agno - b2->agno;
627 if (!diff)
628 diff = b1->bno - b2->bno;
629 return diff;
630}
631