1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include "internal.h"
18
19static unsigned afs_vlocation_timeout = 10;
20static unsigned afs_vlocation_update_timeout = 10 * 60;
21
22static void afs_vlocation_reaper(struct work_struct *);
23static void afs_vlocation_updater(struct work_struct *);
24
25static LIST_HEAD(afs_vlocation_updates);
26static LIST_HEAD(afs_vlocation_graveyard);
27static DEFINE_SPINLOCK(afs_vlocation_updates_lock);
28static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock);
29static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper);
30static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater);
31static struct workqueue_struct *afs_vlocation_update_worker;
32
33
34
35
36
37static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl,
38 struct key *key,
39 struct afs_cache_vlocation *vldb)
40{
41 struct afs_cell *cell = vl->cell;
42 struct in_addr addr;
43 int count, ret;
44
45 _enter("%s,%s", cell->name, vl->vldb.name);
46
47 down_write(&vl->cell->vl_sem);
48 ret = -ENOMEDIUM;
49 for (count = cell->vl_naddrs; count > 0; count--) {
50 addr = cell->vl_addrs[cell->vl_curr_svix];
51
52 _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
53
54
55 ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
56 false);
57 switch (ret) {
58 case 0:
59 goto out;
60 case -ENOMEM:
61 case -ENONET:
62 case -ENETUNREACH:
63 case -EHOSTUNREACH:
64 case -ECONNREFUSED:
65 if (ret == -ENOMEM || ret == -ENONET)
66 goto out;
67 goto rotate;
68 case -ENOMEDIUM:
69 case -EKEYREJECTED:
70 case -EKEYEXPIRED:
71 goto out;
72 default:
73 ret = -EIO;
74 goto rotate;
75 }
76
77
78 rotate:
79 cell->vl_curr_svix++;
80 cell->vl_curr_svix %= cell->vl_naddrs;
81 }
82
83out:
84 up_write(&vl->cell->vl_sem);
85 _leave(" = %d", ret);
86 return ret;
87}
88
89
90
91
92
93static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
94 struct key *key,
95 afs_volid_t volid,
96 afs_voltype_t voltype,
97 struct afs_cache_vlocation *vldb)
98{
99 struct afs_cell *cell = vl->cell;
100 struct in_addr addr;
101 int count, ret;
102
103 _enter("%s,%x,%d,", cell->name, volid, voltype);
104
105 down_write(&vl->cell->vl_sem);
106 ret = -ENOMEDIUM;
107 for (count = cell->vl_naddrs; count > 0; count--) {
108 addr = cell->vl_addrs[cell->vl_curr_svix];
109
110 _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
111
112
113 ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
114 false);
115 switch (ret) {
116 case 0:
117 goto out;
118 case -ENOMEM:
119 case -ENONET:
120 case -ENETUNREACH:
121 case -EHOSTUNREACH:
122 case -ECONNREFUSED:
123 if (ret == -ENOMEM || ret == -ENONET)
124 goto out;
125 goto rotate;
126 case -EBUSY:
127 vl->upd_busy_cnt++;
128 if (vl->upd_busy_cnt <= 3) {
129 if (vl->upd_busy_cnt > 1) {
130
131 set_current_state(TASK_UNINTERRUPTIBLE);
132 schedule_timeout(1);
133 }
134 continue;
135 }
136 break;
137 case -ENOMEDIUM:
138 vl->upd_rej_cnt++;
139 goto rotate;
140 default:
141 ret = -EIO;
142 goto rotate;
143 }
144
145
146 rotate:
147 cell->vl_curr_svix++;
148 cell->vl_curr_svix %= cell->vl_naddrs;
149 vl->upd_busy_cnt = 0;
150 }
151
152out:
153 if (ret < 0 && vl->upd_rej_cnt > 0) {
154 printk(KERN_NOTICE "kAFS:"
155 " Active volume no longer valid '%s'\n",
156 vl->vldb.name);
157 vl->valid = 0;
158 ret = -ENOMEDIUM;
159 }
160
161 up_write(&vl->cell->vl_sem);
162 _leave(" = %d", ret);
163 return ret;
164}
165
166
167
168
169static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell,
170 const char *name,
171 size_t namesz)
172{
173 struct afs_vlocation *vl;
174
175 vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
176 if (vl) {
177 vl->cell = cell;
178 vl->state = AFS_VL_NEW;
179 atomic_set(&vl->usage, 1);
180 INIT_LIST_HEAD(&vl->link);
181 INIT_LIST_HEAD(&vl->grave);
182 INIT_LIST_HEAD(&vl->update);
183 init_waitqueue_head(&vl->waitq);
184 spin_lock_init(&vl->lock);
185 memcpy(vl->vldb.name, name, namesz);
186 }
187
188 _leave(" = %p", vl);
189 return vl;
190}
191
192
193
194
195static int afs_vlocation_update_record(struct afs_vlocation *vl,
196 struct key *key,
197 struct afs_cache_vlocation *vldb)
198{
199 afs_voltype_t voltype;
200 afs_volid_t vid;
201 int ret;
202
203
204 _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
205 vl->vldb.name,
206 vl->vldb.vidmask,
207 ntohl(vl->vldb.servers[0].s_addr),
208 vl->vldb.srvtmask[0],
209 ntohl(vl->vldb.servers[1].s_addr),
210 vl->vldb.srvtmask[1],
211 ntohl(vl->vldb.servers[2].s_addr),
212 vl->vldb.srvtmask[2]);
213
214 _debug("Vids: %08x %08x %08x",
215 vl->vldb.vid[0],
216 vl->vldb.vid[1],
217 vl->vldb.vid[2]);
218
219 if (vl->vldb.vidmask & AFS_VOL_VTM_RW) {
220 vid = vl->vldb.vid[0];
221 voltype = AFSVL_RWVOL;
222 } else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) {
223 vid = vl->vldb.vid[1];
224 voltype = AFSVL_ROVOL;
225 } else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) {
226 vid = vl->vldb.vid[2];
227 voltype = AFSVL_BACKVOL;
228 } else {
229 BUG();
230 vid = 0;
231 voltype = 0;
232 }
233
234
235
236
237 ret = afs_vlocation_access_vl_by_id(vl, key, vid, voltype, vldb);
238 switch (ret) {
239
240 default:
241 printk(KERN_WARNING "kAFS:"
242 " failed to update volume '%s' (%x) up in '%s': %d\n",
243 vl->vldb.name, vid, vl->cell->name, ret);
244 _leave(" = %d", ret);
245 return ret;
246
247
248 case 0:
249 _leave(" = 0");
250 return 0;
251
252
253 case -ENOMEDIUM:
254 printk(KERN_ERR "kAFS:"
255 " volume '%s' (%x) does not exist '%s'\n",
256 vl->vldb.name, vid, vl->cell->name);
257
258
259 _leave(" = %d", ret);
260 return ret;
261 }
262}
263
264
265
266
267static void afs_vlocation_apply_update(struct afs_vlocation *vl,
268 struct afs_cache_vlocation *vldb)
269{
270 _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
271 vldb->name, vldb->vidmask,
272 ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0],
273 ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1],
274 ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]);
275
276 _debug("Vids: %08x %08x %08x",
277 vldb->vid[0], vldb->vid[1], vldb->vid[2]);
278
279 if (strcmp(vldb->name, vl->vldb.name) != 0)
280 printk(KERN_NOTICE "kAFS:"
281 " name of volume '%s' changed to '%s' on server\n",
282 vl->vldb.name, vldb->name);
283
284 vl->vldb = *vldb;
285
286#ifdef CONFIG_AFS_FSCACHE
287 fscache_update_cookie(vl->cache);
288#endif
289}
290
291
292
293
294
295static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
296 struct key *key)
297{
298 struct afs_cache_vlocation vldb;
299 int ret;
300
301 _enter("");
302
303 ASSERTCMP(vl->valid, ==, 0);
304
305 memset(&vldb, 0, sizeof(vldb));
306
307
308#ifdef CONFIG_AFS_FSCACHE
309 vl->cache = fscache_acquire_cookie(vl->cell->cache,
310 &afs_vlocation_cache_index_def, vl,
311 true);
312#endif
313
314 if (vl->valid) {
315
316
317 _debug("found in cache");
318 ret = afs_vlocation_update_record(vl, key, &vldb);
319 } else {
320
321
322 ret = afs_vlocation_access_vl_by_name(vl, key, &vldb);
323 if (ret < 0) {
324 printk("kAFS: failed to locate '%s' in cell '%s'\n",
325 vl->vldb.name, vl->cell->name);
326 return ret;
327 }
328 }
329
330 afs_vlocation_apply_update(vl, &vldb);
331 _leave(" = 0");
332 return 0;
333}
334
335
336
337
338static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
339{
340 struct afs_vlocation *xvl;
341
342
343 vl->update_at = ktime_get_real_seconds() +
344 afs_vlocation_update_timeout;
345
346 spin_lock(&afs_vlocation_updates_lock);
347
348 if (!list_empty(&afs_vlocation_updates)) {
349
350
351
352
353 xvl = list_entry(afs_vlocation_updates.prev,
354 struct afs_vlocation, update);
355 if (vl->update_at <= xvl->update_at)
356 vl->update_at = xvl->update_at + 1;
357 } else {
358 queue_delayed_work(afs_vlocation_update_worker,
359 &afs_vlocation_update,
360 afs_vlocation_update_timeout * HZ);
361 }
362
363 list_add_tail(&vl->update, &afs_vlocation_updates);
364 spin_unlock(&afs_vlocation_updates_lock);
365}
366
367
368
369
370
371
372
373
374struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
375 struct key *key,
376 const char *name,
377 size_t namesz)
378{
379 struct afs_vlocation *vl;
380 int ret;
381
382 _enter("{%s},{%x},%*.*s,%zu",
383 cell->name, key_serial(key),
384 (int) namesz, (int) namesz, name, namesz);
385
386 if (namesz >= sizeof(vl->vldb.name)) {
387 _leave(" = -ENAMETOOLONG");
388 return ERR_PTR(-ENAMETOOLONG);
389 }
390
391
392 down_write(&cell->vl_sem);
393 spin_lock(&cell->vl_lock);
394 list_for_each_entry(vl, &cell->vl_list, link) {
395 if (vl->vldb.name[namesz] != '\0')
396 continue;
397 if (memcmp(vl->vldb.name, name, namesz) == 0)
398 goto found_in_memory;
399 }
400 spin_unlock(&cell->vl_lock);
401
402
403 vl = afs_vlocation_alloc(cell, name, namesz);
404 if (!vl) {
405 up_write(&cell->vl_sem);
406 return ERR_PTR(-ENOMEM);
407 }
408
409 afs_get_cell(cell);
410
411 list_add_tail(&vl->link, &cell->vl_list);
412 vl->state = AFS_VL_CREATING;
413 up_write(&cell->vl_sem);
414
415fill_in_record:
416 ret = afs_vlocation_fill_in_record(vl, key);
417 if (ret < 0)
418 goto error_abandon;
419 spin_lock(&vl->lock);
420 vl->state = AFS_VL_VALID;
421 spin_unlock(&vl->lock);
422 wake_up(&vl->waitq);
423
424
425#ifdef CONFIG_AFS_FSCACHE
426 fscache_update_cookie(vl->cache);
427#endif
428
429
430 afs_vlocation_queue_for_updates(vl);
431 goto success;
432
433found_in_memory:
434
435 _debug("found in memory");
436 atomic_inc(&vl->usage);
437 spin_unlock(&cell->vl_lock);
438 if (!list_empty(&vl->grave)) {
439 spin_lock(&afs_vlocation_graveyard_lock);
440 list_del_init(&vl->grave);
441 spin_unlock(&afs_vlocation_graveyard_lock);
442 }
443 up_write(&cell->vl_sem);
444
445
446 spin_lock(&vl->lock);
447 while (vl->state != AFS_VL_VALID) {
448 afs_vlocation_state_t state = vl->state;
449
450 _debug("invalid [state %d]", state);
451
452 if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) {
453 vl->state = AFS_VL_CREATING;
454 spin_unlock(&vl->lock);
455 goto fill_in_record;
456 }
457
458
459
460 _debug("wait");
461
462 spin_unlock(&vl->lock);
463 ret = wait_event_interruptible(vl->waitq,
464 vl->state == AFS_VL_NEW ||
465 vl->state == AFS_VL_VALID ||
466 vl->state == AFS_VL_NO_VOLUME);
467 if (ret < 0)
468 goto error;
469 spin_lock(&vl->lock);
470 }
471 spin_unlock(&vl->lock);
472
473success:
474 _leave(" = %p", vl);
475 return vl;
476
477error_abandon:
478 spin_lock(&vl->lock);
479 vl->state = AFS_VL_NEW;
480 spin_unlock(&vl->lock);
481 wake_up(&vl->waitq);
482error:
483 ASSERT(vl != NULL);
484 afs_put_vlocation(vl);
485 _leave(" = %d", ret);
486 return ERR_PTR(ret);
487}
488
489
490
491
492void afs_put_vlocation(struct afs_vlocation *vl)
493{
494 if (!vl)
495 return;
496
497 _enter("%s", vl->vldb.name);
498
499 ASSERTCMP(atomic_read(&vl->usage), >, 0);
500
501 if (likely(!atomic_dec_and_test(&vl->usage))) {
502 _leave("");
503 return;
504 }
505
506 spin_lock(&afs_vlocation_graveyard_lock);
507 if (atomic_read(&vl->usage) == 0) {
508 _debug("buried");
509 list_move_tail(&vl->grave, &afs_vlocation_graveyard);
510 vl->time_of_death = ktime_get_real_seconds();
511 queue_delayed_work(afs_wq, &afs_vlocation_reap,
512 afs_vlocation_timeout * HZ);
513
514
515 if (!list_empty(&vl->update)) {
516 spin_lock(&afs_vlocation_updates_lock);
517 list_del_init(&vl->update);
518 spin_unlock(&afs_vlocation_updates_lock);
519 }
520 }
521 spin_unlock(&afs_vlocation_graveyard_lock);
522 _leave(" [killed?]");
523}
524
525
526
527
528static void afs_vlocation_destroy(struct afs_vlocation *vl)
529{
530 _enter("%p", vl);
531
532#ifdef CONFIG_AFS_FSCACHE
533 fscache_relinquish_cookie(vl->cache, 0);
534#endif
535 afs_put_cell(vl->cell);
536 kfree(vl);
537}
538
539
540
541
542static void afs_vlocation_reaper(struct work_struct *work)
543{
544 LIST_HEAD(corpses);
545 struct afs_vlocation *vl;
546 unsigned long delay, expiry;
547 time64_t now;
548
549 _enter("");
550
551 now = ktime_get_real_seconds();
552 spin_lock(&afs_vlocation_graveyard_lock);
553
554 while (!list_empty(&afs_vlocation_graveyard)) {
555 vl = list_entry(afs_vlocation_graveyard.next,
556 struct afs_vlocation, grave);
557
558 _debug("check %p", vl);
559
560
561 expiry = vl->time_of_death + afs_vlocation_timeout;
562 if (expiry > now) {
563 delay = (expiry - now) * HZ;
564 _debug("delay %lu", delay);
565 mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
566 break;
567 }
568
569 spin_lock(&vl->cell->vl_lock);
570 if (atomic_read(&vl->usage) > 0) {
571 _debug("no reap");
572 list_del_init(&vl->grave);
573 } else {
574 _debug("reap");
575 list_move_tail(&vl->grave, &corpses);
576 list_del_init(&vl->link);
577 }
578 spin_unlock(&vl->cell->vl_lock);
579 }
580
581 spin_unlock(&afs_vlocation_graveyard_lock);
582
583
584 while (!list_empty(&corpses)) {
585 vl = list_entry(corpses.next, struct afs_vlocation, grave);
586 list_del(&vl->grave);
587 afs_vlocation_destroy(vl);
588 }
589
590 _leave("");
591}
592
593
594
595
596int __init afs_vlocation_update_init(void)
597{
598 afs_vlocation_update_worker = alloc_workqueue("kafs_vlupdated",
599 WQ_MEM_RECLAIM, 0);
600 return afs_vlocation_update_worker ? 0 : -ENOMEM;
601}
602
603
604
605
606void afs_vlocation_purge(void)
607{
608 afs_vlocation_timeout = 0;
609
610 spin_lock(&afs_vlocation_updates_lock);
611 list_del_init(&afs_vlocation_updates);
612 spin_unlock(&afs_vlocation_updates_lock);
613 mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
614 destroy_workqueue(afs_vlocation_update_worker);
615
616 mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
617}
618
619
620
621
622static void afs_vlocation_updater(struct work_struct *work)
623{
624 struct afs_cache_vlocation vldb;
625 struct afs_vlocation *vl, *xvl;
626 time64_t now;
627 long timeout;
628 int ret;
629
630 _enter("");
631
632 now = ktime_get_real_seconds();
633
634
635 spin_lock(&afs_vlocation_updates_lock);
636 for (;;) {
637 if (list_empty(&afs_vlocation_updates)) {
638 spin_unlock(&afs_vlocation_updates_lock);
639 _leave(" [nothing]");
640 return;
641 }
642
643 vl = list_entry(afs_vlocation_updates.next,
644 struct afs_vlocation, update);
645 if (atomic_read(&vl->usage) > 0)
646 break;
647 list_del_init(&vl->update);
648 }
649
650 timeout = vl->update_at - now;
651 if (timeout > 0) {
652 queue_delayed_work(afs_vlocation_update_worker,
653 &afs_vlocation_update, timeout * HZ);
654 spin_unlock(&afs_vlocation_updates_lock);
655 _leave(" [nothing]");
656 return;
657 }
658
659 list_del_init(&vl->update);
660 atomic_inc(&vl->usage);
661 spin_unlock(&afs_vlocation_updates_lock);
662
663
664 _debug("update %s", vl->vldb.name);
665 vl->state = AFS_VL_UPDATING;
666 vl->upd_rej_cnt = 0;
667 vl->upd_busy_cnt = 0;
668
669 ret = afs_vlocation_update_record(vl, NULL, &vldb);
670 spin_lock(&vl->lock);
671 switch (ret) {
672 case 0:
673 afs_vlocation_apply_update(vl, &vldb);
674 vl->state = AFS_VL_VALID;
675 break;
676 case -ENOMEDIUM:
677 vl->state = AFS_VL_VOLUME_DELETED;
678 break;
679 default:
680 vl->state = AFS_VL_UNCERTAIN;
681 break;
682 }
683 spin_unlock(&vl->lock);
684 wake_up(&vl->waitq);
685
686
687 _debug("reschedule");
688 vl->update_at = ktime_get_real_seconds() +
689 afs_vlocation_update_timeout;
690
691 spin_lock(&afs_vlocation_updates_lock);
692
693 if (!list_empty(&afs_vlocation_updates)) {
694
695
696
697
698 xvl = list_entry(afs_vlocation_updates.prev,
699 struct afs_vlocation, update);
700 if (vl->update_at <= xvl->update_at)
701 vl->update_at = xvl->update_at + 1;
702 xvl = list_entry(afs_vlocation_updates.next,
703 struct afs_vlocation, update);
704 timeout = xvl->update_at - now;
705 if (timeout < 0)
706 timeout = 0;
707 } else {
708 timeout = afs_vlocation_update_timeout;
709 }
710
711 ASSERT(list_empty(&vl->update));
712
713 list_add_tail(&vl->update, &afs_vlocation_updates);
714
715 _debug("timeout %ld", timeout);
716 queue_delayed_work(afs_vlocation_update_worker,
717 &afs_vlocation_update, timeout * HZ);
718 spin_unlock(&afs_vlocation_updates_lock);
719 afs_put_vlocation(vl);
720}
721