1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <linux/slab.h>
45#include <linux/pagemap.h>
46#include <linux/list_sort.h>
47#include "ubifs.h"
48
49
50
51
52
53
54#define SOFT_LEBS_LIMIT 4
55#define HARD_LEBS_LIMIT 32
56
57
58
59
60
61
62
63
64
65static int switch_gc_head(struct ubifs_info *c)
66{
67 int err, gc_lnum = c->gc_lnum;
68 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
69
70 ubifs_assert(c, gc_lnum != -1);
71 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
72 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
73 c->leb_size - wbuf->offs - wbuf->used);
74
75 err = ubifs_wbuf_sync_nolock(wbuf);
76 if (err)
77 return err;
78
79
80
81
82
83 err = ubifs_leb_unmap(c, gc_lnum);
84 if (err)
85 return err;
86
87 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
88 if (err)
89 return err;
90
91 c->gc_lnum = -1;
92 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
93 return err;
94}
95
96
97
98
99
100
101
102
103
104
105static int data_nodes_cmp(void *priv, const struct list_head *a,
106 const struct list_head *b)
107{
108 ino_t inuma, inumb;
109 struct ubifs_info *c = priv;
110 struct ubifs_scan_node *sa, *sb;
111
112 cond_resched();
113 if (a == b)
114 return 0;
115
116 sa = list_entry(a, struct ubifs_scan_node, list);
117 sb = list_entry(b, struct ubifs_scan_node, list);
118
119 ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DATA_KEY);
120 ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DATA_KEY);
121 ubifs_assert(c, sa->type == UBIFS_DATA_NODE);
122 ubifs_assert(c, sb->type == UBIFS_DATA_NODE);
123
124 inuma = key_inum(c, &sa->key);
125 inumb = key_inum(c, &sb->key);
126
127 if (inuma == inumb) {
128 unsigned int blka = key_block(c, &sa->key);
129 unsigned int blkb = key_block(c, &sb->key);
130
131 if (blka <= blkb)
132 return -1;
133 } else if (inuma <= inumb)
134 return -1;
135
136 return 1;
137}
138
139
140
141
142
143
144
145
146
147
148
149static int nondata_nodes_cmp(void *priv, const struct list_head *a,
150 const struct list_head *b)
151{
152 ino_t inuma, inumb;
153 struct ubifs_info *c = priv;
154 struct ubifs_scan_node *sa, *sb;
155
156 cond_resched();
157 if (a == b)
158 return 0;
159
160 sa = list_entry(a, struct ubifs_scan_node, list);
161 sb = list_entry(b, struct ubifs_scan_node, list);
162
163 ubifs_assert(c, key_type(c, &sa->key) != UBIFS_DATA_KEY &&
164 key_type(c, &sb->key) != UBIFS_DATA_KEY);
165 ubifs_assert(c, sa->type != UBIFS_DATA_NODE &&
166 sb->type != UBIFS_DATA_NODE);
167
168
169 if (sa->type == UBIFS_INO_NODE) {
170 if (sb->type == UBIFS_INO_NODE)
171 return sb->len - sa->len;
172 return -1;
173 }
174 if (sb->type == UBIFS_INO_NODE)
175 return 1;
176
177 ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DENT_KEY ||
178 key_type(c, &sa->key) == UBIFS_XENT_KEY);
179 ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DENT_KEY ||
180 key_type(c, &sb->key) == UBIFS_XENT_KEY);
181 ubifs_assert(c, sa->type == UBIFS_DENT_NODE ||
182 sa->type == UBIFS_XENT_NODE);
183 ubifs_assert(c, sb->type == UBIFS_DENT_NODE ||
184 sb->type == UBIFS_XENT_NODE);
185
186 inuma = key_inum(c, &sa->key);
187 inumb = key_inum(c, &sb->key);
188
189 if (inuma == inumb) {
190 uint32_t hasha = key_hash(c, &sa->key);
191 uint32_t hashb = key_hash(c, &sb->key);
192
193 if (hasha <= hashb)
194 return -1;
195 } else if (inuma <= inumb)
196 return -1;
197
198 return 1;
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
229 struct list_head *nondata, int *min)
230{
231 int err;
232 struct ubifs_scan_node *snod, *tmp;
233
234 *min = INT_MAX;
235
236
237 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
238 ubifs_assert(c, snod->type == UBIFS_INO_NODE ||
239 snod->type == UBIFS_DATA_NODE ||
240 snod->type == UBIFS_DENT_NODE ||
241 snod->type == UBIFS_XENT_NODE ||
242 snod->type == UBIFS_TRUN_NODE ||
243 snod->type == UBIFS_AUTH_NODE);
244
245 if (snod->type != UBIFS_INO_NODE &&
246 snod->type != UBIFS_DATA_NODE &&
247 snod->type != UBIFS_DENT_NODE &&
248 snod->type != UBIFS_XENT_NODE) {
249
250 list_del(&snod->list);
251 kfree(snod);
252 continue;
253 }
254
255 ubifs_assert(c, key_type(c, &snod->key) == UBIFS_DATA_KEY ||
256 key_type(c, &snod->key) == UBIFS_INO_KEY ||
257 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
258 key_type(c, &snod->key) == UBIFS_XENT_KEY);
259
260 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
261 snod->offs, 0);
262 if (err < 0)
263 return err;
264
265 if (!err) {
266
267 list_del(&snod->list);
268 kfree(snod);
269 continue;
270 }
271
272 if (snod->len < *min)
273 *min = snod->len;
274
275 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
276 list_move_tail(&snod->list, nondata);
277 }
278
279
280 list_sort(c, &sleb->nodes, &data_nodes_cmp);
281 list_sort(c, nondata, &nondata_nodes_cmp);
282
283 err = dbg_check_data_nodes_order(c, &sleb->nodes);
284 if (err)
285 return err;
286 err = dbg_check_nondata_nodes_order(c, nondata);
287 if (err)
288 return err;
289 return 0;
290}
291
292
293
294
295
296
297
298
299
300
301
302
303static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
304 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
305{
306 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
307
308 cond_resched();
309 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
310 if (err)
311 return err;
312
313 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
314 snod->offs, new_lnum, new_offs,
315 snod->len);
316 list_del(&snod->list);
317 kfree(snod);
318 return err;
319}
320
321
322
323
324
325
326
327
328
329
330
331static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
332{
333 int err, min;
334 LIST_HEAD(nondata);
335 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
336
337 if (wbuf->lnum == -1) {
338
339
340
341
342 err = switch_gc_head(c);
343 if (err)
344 return err;
345 }
346
347 err = sort_nodes(c, sleb, &nondata, &min);
348 if (err)
349 goto out;
350
351
352 while (1) {
353 int avail, moved = 0;
354 struct ubifs_scan_node *snod, *tmp;
355
356
357 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
358 avail = c->leb_size - wbuf->offs - wbuf->used -
359 ubifs_auth_node_sz(c);
360 if (snod->len > avail)
361
362
363
364
365 break;
366
367 err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
368 snod->node, snod->len);
369 if (err)
370 goto out;
371
372 err = move_node(c, sleb, snod, wbuf);
373 if (err)
374 goto out;
375 moved = 1;
376 }
377
378
379 list_for_each_entry_safe(snod, tmp, &nondata, list) {
380 avail = c->leb_size - wbuf->offs - wbuf->used -
381 ubifs_auth_node_sz(c);
382 if (avail < min)
383 break;
384
385 if (snod->len > avail) {
386
387
388
389
390
391
392
393 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
394 snod->len == UBIFS_INO_NODE_SZ)
395 break;
396 continue;
397 }
398
399 err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
400 snod->node, snod->len);
401 if (err)
402 goto out;
403
404 err = move_node(c, sleb, snod, wbuf);
405 if (err)
406 goto out;
407 moved = 1;
408 }
409
410 if (ubifs_authenticated(c) && moved) {
411 struct ubifs_auth_node *auth;
412
413 auth = kmalloc(ubifs_auth_node_sz(c), GFP_NOFS);
414 if (!auth) {
415 err = -ENOMEM;
416 goto out;
417 }
418
419 err = ubifs_prepare_auth_node(c, auth,
420 c->jheads[GCHD].log_hash);
421 if (err) {
422 kfree(auth);
423 goto out;
424 }
425
426 err = ubifs_wbuf_write_nolock(wbuf, auth,
427 ubifs_auth_node_sz(c));
428 if (err) {
429 kfree(auth);
430 goto out;
431 }
432
433 ubifs_add_dirt(c, wbuf->lnum, ubifs_auth_node_sz(c));
434 }
435
436 if (list_empty(&sleb->nodes) && list_empty(&nondata))
437 break;
438
439
440
441
442
443 err = switch_gc_head(c);
444 if (err)
445 goto out;
446 }
447
448 return 0;
449
450out:
451 list_splice_tail(&nondata, &sleb->nodes);
452 return err;
453}
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468static int gc_sync_wbufs(struct ubifs_info *c)
469{
470 int err, i;
471
472 for (i = 0; i < c->jhead_cnt; i++) {
473 if (i == GCHD)
474 continue;
475 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
476 if (err)
477 return err;
478 }
479 return 0;
480}
481
482
483
484
485
486
487
488
489
490
491int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
492{
493 struct ubifs_scan_leb *sleb;
494 struct ubifs_scan_node *snod;
495 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
496 int err = 0, lnum = lp->lnum;
497
498 ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
499 c->need_recovery);
500 ubifs_assert(c, c->gc_lnum != lnum);
501 ubifs_assert(c, wbuf->lnum != lnum);
502
503 if (lp->free + lp->dirty == c->leb_size) {
504
505 dbg_gc("LEB %d is free, return it", lp->lnum);
506 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
507
508 if (lp->free != c->leb_size) {
509
510
511
512
513
514 err = gc_sync_wbufs(c);
515 if (err)
516 return err;
517 err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
518 0, 0, 0, 0);
519 if (err)
520 return err;
521 }
522 err = ubifs_leb_unmap(c, lp->lnum);
523 if (err)
524 return err;
525
526 if (c->gc_lnum == -1) {
527 c->gc_lnum = lnum;
528 return LEB_RETAINED;
529 }
530
531 return LEB_FREED;
532 }
533
534
535
536
537
538 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
539 if (IS_ERR(sleb))
540 return PTR_ERR(sleb);
541
542 ubifs_assert(c, !list_empty(&sleb->nodes));
543 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
544
545 if (snod->type == UBIFS_IDX_NODE) {
546 struct ubifs_gced_idx_leb *idx_gc;
547
548 dbg_gc("indexing LEB %d (free %d, dirty %d)",
549 lnum, lp->free, lp->dirty);
550 list_for_each_entry(snod, &sleb->nodes, list) {
551 struct ubifs_idx_node *idx = snod->node;
552 int level = le16_to_cpu(idx->level);
553
554 ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
555 key_read(c, ubifs_idx_key(c, idx), &snod->key);
556 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
557 snod->offs);
558 if (err)
559 goto out;
560 }
561
562 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
563 if (!idx_gc) {
564 err = -ENOMEM;
565 goto out;
566 }
567
568 idx_gc->lnum = lnum;
569 idx_gc->unmap = 0;
570 list_add(&idx_gc->list, &c->idx_gc);
571
572
573
574
575
576
577
578 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
579 LPROPS_INDEX, 1);
580 if (err)
581 goto out;
582 err = LEB_FREED_IDX;
583 } else {
584 dbg_gc("data LEB %d (free %d, dirty %d)",
585 lnum, lp->free, lp->dirty);
586
587 err = move_nodes(c, sleb);
588 if (err)
589 goto out_inc_seq;
590
591 err = gc_sync_wbufs(c);
592 if (err)
593 goto out_inc_seq;
594
595 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
596 if (err)
597 goto out_inc_seq;
598
599
600 c->gced_lnum = lnum;
601 smp_wmb();
602 c->gc_seq += 1;
603 smp_wmb();
604
605 if (c->gc_lnum == -1) {
606 c->gc_lnum = lnum;
607 err = LEB_RETAINED;
608 } else {
609 err = ubifs_wbuf_sync_nolock(wbuf);
610 if (err)
611 goto out;
612
613 err = ubifs_leb_unmap(c, lnum);
614 if (err)
615 goto out;
616
617 err = LEB_FREED;
618 }
619 }
620
621out:
622 ubifs_scan_destroy(sleb);
623 return err;
624
625out_inc_seq:
626
627 c->gced_lnum = lnum;
628 smp_wmb();
629 c->gc_seq += 1;
630 smp_wmb();
631 goto out;
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
671{
672 int i, err, ret, min_space = c->dead_wm;
673 struct ubifs_lprops lp;
674 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
675
676 ubifs_assert_cmt_locked(c);
677 ubifs_assert(c, !c->ro_media && !c->ro_mount);
678
679 if (ubifs_gc_should_commit(c))
680 return -EAGAIN;
681
682 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
683
684 if (c->ro_error) {
685 ret = -EROFS;
686 goto out_unlock;
687 }
688
689
690 ubifs_assert(c, !wbuf->used);
691
692 for (i = 0; ; i++) {
693 int space_before, space_after;
694
695 cond_resched();
696
697
698 if (ubifs_gc_should_commit(c)) {
699 ret = -EAGAIN;
700 break;
701 }
702
703 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
704
705
706
707
708 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
709 ubifs_commit_required(c);
710 ret = -EAGAIN;
711 break;
712 }
713
714 if (i > HARD_LEBS_LIMIT) {
715
716
717
718
719 dbg_gc("hard limit, -ENOSPC");
720 ret = -ENOSPC;
721 break;
722 }
723
724
725
726
727
728
729
730
731 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
732 if (ret) {
733 if (ret == -ENOSPC)
734 dbg_gc("no more dirty LEBs");
735 break;
736 }
737
738 dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
739 lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
740 min_space);
741
742 space_before = c->leb_size - wbuf->offs - wbuf->used;
743 if (wbuf->lnum == -1)
744 space_before = 0;
745
746 ret = ubifs_garbage_collect_leb(c, &lp);
747 if (ret < 0) {
748 if (ret == -EAGAIN) {
749
750
751
752
753
754
755 err = ubifs_return_leb(c, lp.lnum);
756 if (err)
757 ret = err;
758 break;
759 }
760 goto out;
761 }
762
763 if (ret == LEB_FREED) {
764
765 dbg_gc("LEB %d freed, return", lp.lnum);
766 ret = lp.lnum;
767 break;
768 }
769
770 if (ret == LEB_FREED_IDX) {
771
772
773
774
775
776
777 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
778 continue;
779 }
780
781 ubifs_assert(c, ret == LEB_RETAINED);
782 space_after = c->leb_size - wbuf->offs - wbuf->used;
783 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
784 space_after - space_before);
785
786 if (space_after > space_before) {
787
788 min_space >>= 1;
789 if (min_space < c->dead_wm)
790 min_space = c->dead_wm;
791 continue;
792 }
793
794 dbg_gc("did not make progress");
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812 if (i < SOFT_LEBS_LIMIT) {
813 dbg_gc("try again");
814 continue;
815 }
816
817 min_space <<= 1;
818 if (min_space > c->dark_wm)
819 min_space = c->dark_wm;
820 dbg_gc("set min. space to %d", min_space);
821 }
822
823 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
824 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
825 ubifs_commit_required(c);
826 ret = -EAGAIN;
827 }
828
829 err = ubifs_wbuf_sync_nolock(wbuf);
830 if (!err)
831 err = ubifs_leb_unmap(c, c->gc_lnum);
832 if (err) {
833 ret = err;
834 goto out;
835 }
836out_unlock:
837 mutex_unlock(&wbuf->io_mutex);
838 return ret;
839
840out:
841 ubifs_assert(c, ret < 0);
842 ubifs_assert(c, ret != -ENOSPC && ret != -EAGAIN);
843 ubifs_wbuf_sync_nolock(wbuf);
844 ubifs_ro_mode(c, ret);
845 mutex_unlock(&wbuf->io_mutex);
846 ubifs_return_leb(c, lp.lnum);
847 return ret;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861int ubifs_gc_start_commit(struct ubifs_info *c)
862{
863 struct ubifs_gced_idx_leb *idx_gc;
864 const struct ubifs_lprops *lp;
865 int err = 0, flags;
866
867 ubifs_get_lprops(c);
868
869
870
871
872
873 while (1) {
874 lp = ubifs_fast_find_freeable(c);
875 if (!lp)
876 break;
877 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
878 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
879 err = ubifs_leb_unmap(c, lp->lnum);
880 if (err)
881 goto out;
882 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
883 if (IS_ERR(lp)) {
884 err = PTR_ERR(lp);
885 goto out;
886 }
887 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
888 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
889 }
890
891
892 list_for_each_entry(idx_gc, &c->idx_gc, list)
893 idx_gc->unmap = 1;
894
895
896 while (1) {
897 lp = ubifs_fast_find_frdi_idx(c);
898 if (IS_ERR(lp)) {
899 err = PTR_ERR(lp);
900 goto out;
901 }
902 if (!lp)
903 break;
904 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
905 if (!idx_gc) {
906 err = -ENOMEM;
907 goto out;
908 }
909 ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
910 ubifs_assert(c, lp->flags & LPROPS_INDEX);
911
912 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
913 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
914 if (IS_ERR(lp)) {
915 err = PTR_ERR(lp);
916 kfree(idx_gc);
917 goto out;
918 }
919 ubifs_assert(c, lp->flags & LPROPS_TAKEN);
920 ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
921 idx_gc->lnum = lp->lnum;
922 idx_gc->unmap = 1;
923 list_add(&idx_gc->list, &c->idx_gc);
924 }
925out:
926 ubifs_release_lprops(c);
927 return err;
928}
929
930
931
932
933
934
935
936int ubifs_gc_end_commit(struct ubifs_info *c)
937{
938 struct ubifs_gced_idx_leb *idx_gc, *tmp;
939 struct ubifs_wbuf *wbuf;
940 int err = 0;
941
942 wbuf = &c->jheads[GCHD].wbuf;
943 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
944 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
945 if (idx_gc->unmap) {
946 dbg_gc("LEB %d", idx_gc->lnum);
947 err = ubifs_leb_unmap(c, idx_gc->lnum);
948 if (err)
949 goto out;
950 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
951 LPROPS_NC, 0, LPROPS_TAKEN, -1);
952 if (err)
953 goto out;
954 list_del(&idx_gc->list);
955 kfree(idx_gc);
956 }
957out:
958 mutex_unlock(&wbuf->io_mutex);
959 return err;
960}
961
962
963
964
965
966
967
968
969
970void ubifs_destroy_idx_gc(struct ubifs_info *c)
971{
972 while (!list_empty(&c->idx_gc)) {
973 struct ubifs_gced_idx_leb *idx_gc;
974
975 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
976 list);
977 c->idx_gc_cnt -= 1;
978 list_del(&idx_gc->list);
979 kfree(idx_gc);
980 }
981}
982
983
984
985
986
987
988
989int ubifs_get_idx_gc_leb(struct ubifs_info *c)
990{
991 struct ubifs_gced_idx_leb *idx_gc;
992 int lnum;
993
994 if (list_empty(&c->idx_gc))
995 return -ENOSPC;
996 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
997 lnum = idx_gc->lnum;
998
999 list_del(&idx_gc->list);
1000 kfree(idx_gc);
1001 return lnum;
1002}
1003