1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/jiffies.h>
21#include <linux/drbd.h>
22#include <linux/uaccess.h>
23#include <asm/types.h>
24#include <net/sock.h>
25#include <linux/ctype.h>
26#include <linux/mutex.h>
27#include <linux/fs.h>
28#include <linux/file.h>
29#include <linux/proc_fs.h>
30#include <linux/init.h>
31#include <linux/mm.h>
32#include <linux/memcontrol.h>
33#include <linux/mm_inline.h>
34#include <linux/slab.h>
35#include <linux/random.h>
36#include <linux/reboot.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
39#include <linux/workqueue.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/sched/signal.h>
44
45#include <linux/drbd_limits.h>
46#include "drbd_int.h"
47#include "drbd_protocol.h"
48#include "drbd_req.h"
49#include "drbd_vli.h"
50#include "drbd_debugfs.h"
51
52static DEFINE_MUTEX(drbd_main_mutex);
53static int drbd_open(struct block_device *bdev, fmode_t mode);
54static void drbd_release(struct gendisk *gd, fmode_t mode);
55static void md_sync_timer_fn(struct timer_list *t);
56static int w_bitmap_io(struct drbd_work *w, int unused);
57
58MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
59 "Lars Ellenberg <lars@linbit.com>");
60MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
61MODULE_VERSION(REL_VERSION);
62MODULE_LICENSE("GPL");
63MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
64 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
65MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
66
67#include <linux/moduleparam.h>
68
69
70
71#ifdef CONFIG_DRBD_FAULT_INJECTION
72int drbd_enable_faults;
73int drbd_fault_rate;
74static int drbd_fault_count;
75static int drbd_fault_devs;
76
77module_param_named(enable_faults, drbd_enable_faults, int, 0664);
78
79module_param_named(fault_rate, drbd_fault_rate, int, 0664);
80
81module_param_named(fault_count, drbd_fault_count, int, 0664);
82
83module_param_named(fault_devs, drbd_fault_devs, int, 0644);
84#endif
85
86
87static bool drbd_allow_oos;
88static bool drbd_disable_sendpage;
89MODULE_PARM_DESC(allow_oos, "DONT USE!");
90module_param_named(allow_oos, drbd_allow_oos, bool, 0);
91module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
92
93
94int drbd_proc_details;
95module_param_named(proc_details, drbd_proc_details, int, 0644);
96
97unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
98
99
100char drbd_usermode_helper[80] = "/sbin/drbdadm";
101module_param_named(minor_count, drbd_minor_count, uint, 0444);
102module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
103
104
105
106
107struct idr drbd_devices;
108struct list_head drbd_resources;
109struct mutex resources_mutex;
110
111struct kmem_cache *drbd_request_cache;
112struct kmem_cache *drbd_ee_cache;
113struct kmem_cache *drbd_bm_ext_cache;
114struct kmem_cache *drbd_al_ext_cache;
115mempool_t drbd_request_mempool;
116mempool_t drbd_ee_mempool;
117mempool_t drbd_md_io_page_pool;
118struct bio_set drbd_md_io_bio_set;
119struct bio_set drbd_io_bio_set;
120
121
122
123
124
125
126
127struct page *drbd_pp_pool;
128spinlock_t drbd_pp_lock;
129int drbd_pp_vacant;
130wait_queue_head_t drbd_pp_wait;
131
132DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
133
134static const struct block_device_operations drbd_ops = {
135 .owner = THIS_MODULE,
136 .submit_bio = drbd_submit_bio,
137 .open = drbd_open,
138 .release = drbd_release,
139};
140
141#ifdef __CHECKER__
142
143
144
145int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
146{
147 int io_allowed;
148
149 atomic_inc(&device->local_cnt);
150 io_allowed = (device->state.disk >= mins);
151 if (!io_allowed) {
152 if (atomic_dec_and_test(&device->local_cnt))
153 wake_up(&device->misc_wait);
154 }
155 return io_allowed;
156}
157
158#endif
159
160
161
162
163
164
165
166
167
168
169
170void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
171 unsigned int set_size)
172{
173 struct drbd_request *r;
174 struct drbd_request *req = NULL;
175 int expect_epoch = 0;
176 int expect_size = 0;
177
178 spin_lock_irq(&connection->resource->req_lock);
179
180
181
182 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
183 const unsigned s = r->rq_state;
184 if (!req) {
185 if (!(s & RQ_WRITE))
186 continue;
187 if (!(s & RQ_NET_MASK))
188 continue;
189 if (s & RQ_NET_DONE)
190 continue;
191 req = r;
192 expect_epoch = req->epoch;
193 expect_size ++;
194 } else {
195 if (r->epoch != expect_epoch)
196 break;
197 if (!(s & RQ_WRITE))
198 continue;
199
200
201 expect_size++;
202 }
203 }
204
205
206 if (req == NULL) {
207 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
208 barrier_nr);
209 goto bail;
210 }
211 if (expect_epoch != barrier_nr) {
212 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
213 barrier_nr, expect_epoch);
214 goto bail;
215 }
216
217 if (expect_size != set_size) {
218 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
219 barrier_nr, set_size, expect_size);
220 goto bail;
221 }
222
223
224
225
226
227 list_for_each_entry(req, &connection->transfer_log, tl_requests)
228 if (req->epoch == expect_epoch)
229 break;
230 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
231 if (req->epoch != expect_epoch)
232 break;
233 _req_mod(req, BARRIER_ACKED);
234 }
235 spin_unlock_irq(&connection->resource->req_lock);
236
237 return;
238
239bail:
240 spin_unlock_irq(&connection->resource->req_lock);
241 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
242}
243
244
245
246
247
248
249
250
251
252
253
254void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
255{
256 struct drbd_request *req, *r;
257
258 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
259 _req_mod(req, what);
260}
261
262void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
263{
264 spin_lock_irq(&connection->resource->req_lock);
265 _tl_restart(connection, what);
266 spin_unlock_irq(&connection->resource->req_lock);
267}
268
269
270
271
272
273
274
275
276
277void tl_clear(struct drbd_connection *connection)
278{
279 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
280}
281
282
283
284
285
286void tl_abort_disk_io(struct drbd_device *device)
287{
288 struct drbd_connection *connection = first_peer_device(device)->connection;
289 struct drbd_request *req, *r;
290
291 spin_lock_irq(&connection->resource->req_lock);
292 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
293 if (!(req->rq_state & RQ_LOCAL_PENDING))
294 continue;
295 if (req->device != device)
296 continue;
297 _req_mod(req, ABORT_DISK_IO);
298 }
299 spin_unlock_irq(&connection->resource->req_lock);
300}
301
302static int drbd_thread_setup(void *arg)
303{
304 struct drbd_thread *thi = (struct drbd_thread *) arg;
305 struct drbd_resource *resource = thi->resource;
306 unsigned long flags;
307 int retval;
308
309 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
310 thi->name[0],
311 resource->name);
312
313 allow_kernel_signal(DRBD_SIGKILL);
314 allow_kernel_signal(SIGXCPU);
315restart:
316 retval = thi->function(thi);
317
318 spin_lock_irqsave(&thi->t_lock, flags);
319
320
321
322
323
324
325
326
327
328
329
330 if (thi->t_state == RESTARTING) {
331 drbd_info(resource, "Restarting %s thread\n", thi->name);
332 thi->t_state = RUNNING;
333 spin_unlock_irqrestore(&thi->t_lock, flags);
334 goto restart;
335 }
336
337 thi->task = NULL;
338 thi->t_state = NONE;
339 smp_mb();
340 complete_all(&thi->stop);
341 spin_unlock_irqrestore(&thi->t_lock, flags);
342
343 drbd_info(resource, "Terminating %s\n", current->comm);
344
345
346
347 if (thi->connection)
348 kref_put(&thi->connection->kref, drbd_destroy_connection);
349 kref_put(&resource->kref, drbd_destroy_resource);
350 module_put(THIS_MODULE);
351 return retval;
352}
353
354static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
355 int (*func) (struct drbd_thread *), const char *name)
356{
357 spin_lock_init(&thi->t_lock);
358 thi->task = NULL;
359 thi->t_state = NONE;
360 thi->function = func;
361 thi->resource = resource;
362 thi->connection = NULL;
363 thi->name = name;
364}
365
366int drbd_thread_start(struct drbd_thread *thi)
367{
368 struct drbd_resource *resource = thi->resource;
369 struct task_struct *nt;
370 unsigned long flags;
371
372
373
374 spin_lock_irqsave(&thi->t_lock, flags);
375
376 switch (thi->t_state) {
377 case NONE:
378 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
379 thi->name, current->comm, current->pid);
380
381
382 if (!try_module_get(THIS_MODULE)) {
383 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
384 spin_unlock_irqrestore(&thi->t_lock, flags);
385 return false;
386 }
387
388 kref_get(&resource->kref);
389 if (thi->connection)
390 kref_get(&thi->connection->kref);
391
392 init_completion(&thi->stop);
393 thi->reset_cpu_mask = 1;
394 thi->t_state = RUNNING;
395 spin_unlock_irqrestore(&thi->t_lock, flags);
396 flush_signals(current);
397
398 nt = kthread_create(drbd_thread_setup, (void *) thi,
399 "drbd_%c_%s", thi->name[0], thi->resource->name);
400
401 if (IS_ERR(nt)) {
402 drbd_err(resource, "Couldn't start thread\n");
403
404 if (thi->connection)
405 kref_put(&thi->connection->kref, drbd_destroy_connection);
406 kref_put(&resource->kref, drbd_destroy_resource);
407 module_put(THIS_MODULE);
408 return false;
409 }
410 spin_lock_irqsave(&thi->t_lock, flags);
411 thi->task = nt;
412 thi->t_state = RUNNING;
413 spin_unlock_irqrestore(&thi->t_lock, flags);
414 wake_up_process(nt);
415 break;
416 case EXITING:
417 thi->t_state = RESTARTING;
418 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
419 thi->name, current->comm, current->pid);
420 fallthrough;
421 case RUNNING:
422 case RESTARTING:
423 default:
424 spin_unlock_irqrestore(&thi->t_lock, flags);
425 break;
426 }
427
428 return true;
429}
430
431
432void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
433{
434 unsigned long flags;
435
436 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
437
438
439 spin_lock_irqsave(&thi->t_lock, flags);
440
441 if (thi->t_state == NONE) {
442 spin_unlock_irqrestore(&thi->t_lock, flags);
443 if (restart)
444 drbd_thread_start(thi);
445 return;
446 }
447
448 if (thi->t_state != ns) {
449 if (thi->task == NULL) {
450 spin_unlock_irqrestore(&thi->t_lock, flags);
451 return;
452 }
453
454 thi->t_state = ns;
455 smp_mb();
456 init_completion(&thi->stop);
457 if (thi->task != current)
458 send_sig(DRBD_SIGKILL, thi->task, 1);
459 }
460
461 spin_unlock_irqrestore(&thi->t_lock, flags);
462
463 if (wait)
464 wait_for_completion(&thi->stop);
465}
466
467int conn_lowest_minor(struct drbd_connection *connection)
468{
469 struct drbd_peer_device *peer_device;
470 int vnr = 0, minor = -1;
471
472 rcu_read_lock();
473 peer_device = idr_get_next(&connection->peer_devices, &vnr);
474 if (peer_device)
475 minor = device_to_minor(peer_device->device);
476 rcu_read_unlock();
477
478 return minor;
479}
480
481#ifdef CONFIG_SMP
482
483
484
485
486
487
488static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
489{
490 unsigned int *resources_per_cpu, min_index = ~0;
491
492 resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu),
493 GFP_KERNEL);
494 if (resources_per_cpu) {
495 struct drbd_resource *resource;
496 unsigned int cpu, min = ~0;
497
498 rcu_read_lock();
499 for_each_resource_rcu(resource, &drbd_resources) {
500 for_each_cpu(cpu, resource->cpu_mask)
501 resources_per_cpu[cpu]++;
502 }
503 rcu_read_unlock();
504 for_each_online_cpu(cpu) {
505 if (resources_per_cpu[cpu] < min) {
506 min = resources_per_cpu[cpu];
507 min_index = cpu;
508 }
509 }
510 kfree(resources_per_cpu);
511 }
512 if (min_index == ~0) {
513 cpumask_setall(*cpu_mask);
514 return;
515 }
516 cpumask_set_cpu(min_index, *cpu_mask);
517}
518
519
520
521
522
523
524
525
526
527void drbd_thread_current_set_cpu(struct drbd_thread *thi)
528{
529 struct drbd_resource *resource = thi->resource;
530 struct task_struct *p = current;
531
532 if (!thi->reset_cpu_mask)
533 return;
534 thi->reset_cpu_mask = 0;
535 set_cpus_allowed_ptr(p, resource->cpu_mask);
536}
537#else
538#define drbd_calc_cpu_mask(A) ({})
539#endif
540
541
542
543
544
545
546
547
548unsigned int drbd_header_size(struct drbd_connection *connection)
549{
550 if (connection->agreed_pro_version >= 100) {
551 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
552 return sizeof(struct p_header100);
553 } else {
554 BUILD_BUG_ON(sizeof(struct p_header80) !=
555 sizeof(struct p_header95));
556 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
557 return sizeof(struct p_header80);
558 }
559}
560
561static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
562{
563 h->magic = cpu_to_be32(DRBD_MAGIC);
564 h->command = cpu_to_be16(cmd);
565 h->length = cpu_to_be16(size);
566 return sizeof(struct p_header80);
567}
568
569static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
570{
571 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
572 h->command = cpu_to_be16(cmd);
573 h->length = cpu_to_be32(size);
574 return sizeof(struct p_header95);
575}
576
577static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
578 int size, int vnr)
579{
580 h->magic = cpu_to_be32(DRBD_MAGIC_100);
581 h->volume = cpu_to_be16(vnr);
582 h->command = cpu_to_be16(cmd);
583 h->length = cpu_to_be32(size);
584 h->pad = 0;
585 return sizeof(struct p_header100);
586}
587
588static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
589 void *buffer, enum drbd_packet cmd, int size)
590{
591 if (connection->agreed_pro_version >= 100)
592 return prepare_header100(buffer, cmd, size, vnr);
593 else if (connection->agreed_pro_version >= 95 &&
594 size > DRBD_MAX_SIZE_H80_PACKET)
595 return prepare_header95(buffer, cmd, size);
596 else
597 return prepare_header80(buffer, cmd, size);
598}
599
600static void *__conn_prepare_command(struct drbd_connection *connection,
601 struct drbd_socket *sock)
602{
603 if (!sock->socket)
604 return NULL;
605 return sock->sbuf + drbd_header_size(connection);
606}
607
608void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
609{
610 void *p;
611
612 mutex_lock(&sock->mutex);
613 p = __conn_prepare_command(connection, sock);
614 if (!p)
615 mutex_unlock(&sock->mutex);
616
617 return p;
618}
619
620void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
621{
622 return conn_prepare_command(peer_device->connection, sock);
623}
624
625static int __send_command(struct drbd_connection *connection, int vnr,
626 struct drbd_socket *sock, enum drbd_packet cmd,
627 unsigned int header_size, void *data,
628 unsigned int size)
629{
630 int msg_flags;
631 int err;
632
633
634
635
636
637
638
639
640 msg_flags = data ? MSG_MORE : 0;
641
642 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
643 header_size + size);
644 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
645 msg_flags);
646 if (data && !err)
647 err = drbd_send_all(connection, sock->socket, data, size, 0);
648
649
650 if (!err && (cmd == P_PING || cmd == P_PING_ACK))
651 tcp_sock_set_nodelay(sock->socket->sk);
652
653 return err;
654}
655
656static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
657 enum drbd_packet cmd, unsigned int header_size,
658 void *data, unsigned int size)
659{
660 return __send_command(connection, 0, sock, cmd, header_size, data, size);
661}
662
663int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
664 enum drbd_packet cmd, unsigned int header_size,
665 void *data, unsigned int size)
666{
667 int err;
668
669 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
670 mutex_unlock(&sock->mutex);
671 return err;
672}
673
674int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
675 enum drbd_packet cmd, unsigned int header_size,
676 void *data, unsigned int size)
677{
678 int err;
679
680 err = __send_command(peer_device->connection, peer_device->device->vnr,
681 sock, cmd, header_size, data, size);
682 mutex_unlock(&sock->mutex);
683 return err;
684}
685
686int drbd_send_ping(struct drbd_connection *connection)
687{
688 struct drbd_socket *sock;
689
690 sock = &connection->meta;
691 if (!conn_prepare_command(connection, sock))
692 return -EIO;
693 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
694}
695
696int drbd_send_ping_ack(struct drbd_connection *connection)
697{
698 struct drbd_socket *sock;
699
700 sock = &connection->meta;
701 if (!conn_prepare_command(connection, sock))
702 return -EIO;
703 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
704}
705
706int drbd_send_sync_param(struct drbd_peer_device *peer_device)
707{
708 struct drbd_socket *sock;
709 struct p_rs_param_95 *p;
710 int size;
711 const int apv = peer_device->connection->agreed_pro_version;
712 enum drbd_packet cmd;
713 struct net_conf *nc;
714 struct disk_conf *dc;
715
716 sock = &peer_device->connection->data;
717 p = drbd_prepare_command(peer_device, sock);
718 if (!p)
719 return -EIO;
720
721 rcu_read_lock();
722 nc = rcu_dereference(peer_device->connection->net_conf);
723
724 size = apv <= 87 ? sizeof(struct p_rs_param)
725 : apv == 88 ? sizeof(struct p_rs_param)
726 + strlen(nc->verify_alg) + 1
727 : apv <= 94 ? sizeof(struct p_rs_param_89)
728 : sizeof(struct p_rs_param_95);
729
730 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
731
732
733 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
734
735 if (get_ldev(peer_device->device)) {
736 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
737 p->resync_rate = cpu_to_be32(dc->resync_rate);
738 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
739 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
740 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
741 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
742 put_ldev(peer_device->device);
743 } else {
744 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
745 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
746 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
747 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
748 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
749 }
750
751 if (apv >= 88)
752 strcpy(p->verify_alg, nc->verify_alg);
753 if (apv >= 89)
754 strcpy(p->csums_alg, nc->csums_alg);
755 rcu_read_unlock();
756
757 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
758}
759
760int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
761{
762 struct drbd_socket *sock;
763 struct p_protocol *p;
764 struct net_conf *nc;
765 int size, cf;
766
767 sock = &connection->data;
768 p = __conn_prepare_command(connection, sock);
769 if (!p)
770 return -EIO;
771
772 rcu_read_lock();
773 nc = rcu_dereference(connection->net_conf);
774
775 if (nc->tentative && connection->agreed_pro_version < 92) {
776 rcu_read_unlock();
777 drbd_err(connection, "--dry-run is not supported by peer");
778 return -EOPNOTSUPP;
779 }
780
781 size = sizeof(*p);
782 if (connection->agreed_pro_version >= 87)
783 size += strlen(nc->integrity_alg) + 1;
784
785 p->protocol = cpu_to_be32(nc->wire_protocol);
786 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
787 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
788 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
789 p->two_primaries = cpu_to_be32(nc->two_primaries);
790 cf = 0;
791 if (nc->discard_my_data)
792 cf |= CF_DISCARD_MY_DATA;
793 if (nc->tentative)
794 cf |= CF_DRY_RUN;
795 p->conn_flags = cpu_to_be32(cf);
796
797 if (connection->agreed_pro_version >= 87)
798 strcpy(p->integrity_alg, nc->integrity_alg);
799 rcu_read_unlock();
800
801 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
802}
803
804int drbd_send_protocol(struct drbd_connection *connection)
805{
806 int err;
807
808 mutex_lock(&connection->data.mutex);
809 err = __drbd_send_protocol(connection, P_PROTOCOL);
810 mutex_unlock(&connection->data.mutex);
811
812 return err;
813}
814
815static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
816{
817 struct drbd_device *device = peer_device->device;
818 struct drbd_socket *sock;
819 struct p_uuids *p;
820 int i;
821
822 if (!get_ldev_if_state(device, D_NEGOTIATING))
823 return 0;
824
825 sock = &peer_device->connection->data;
826 p = drbd_prepare_command(peer_device, sock);
827 if (!p) {
828 put_ldev(device);
829 return -EIO;
830 }
831 spin_lock_irq(&device->ldev->md.uuid_lock);
832 for (i = UI_CURRENT; i < UI_SIZE; i++)
833 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
834 spin_unlock_irq(&device->ldev->md.uuid_lock);
835
836 device->comm_bm_set = drbd_bm_total_weight(device);
837 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
838 rcu_read_lock();
839 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
840 rcu_read_unlock();
841 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
842 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
843 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
844
845 put_ldev(device);
846 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
847}
848
849int drbd_send_uuids(struct drbd_peer_device *peer_device)
850{
851 return _drbd_send_uuids(peer_device, 0);
852}
853
854int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
855{
856 return _drbd_send_uuids(peer_device, 8);
857}
858
859void drbd_print_uuids(struct drbd_device *device, const char *text)
860{
861 if (get_ldev_if_state(device, D_NEGOTIATING)) {
862 u64 *uuid = device->ldev->md.uuid;
863 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
864 text,
865 (unsigned long long)uuid[UI_CURRENT],
866 (unsigned long long)uuid[UI_BITMAP],
867 (unsigned long long)uuid[UI_HISTORY_START],
868 (unsigned long long)uuid[UI_HISTORY_END]);
869 put_ldev(device);
870 } else {
871 drbd_info(device, "%s effective data uuid: %016llX\n",
872 text,
873 (unsigned long long)device->ed_uuid);
874 }
875}
876
877void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
878{
879 struct drbd_device *device = peer_device->device;
880 struct drbd_socket *sock;
881 struct p_rs_uuid *p;
882 u64 uuid;
883
884 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
885
886 uuid = device->ldev->md.uuid[UI_BITMAP];
887 if (uuid && uuid != UUID_JUST_CREATED)
888 uuid = uuid + UUID_NEW_BM_OFFSET;
889 else
890 get_random_bytes(&uuid, sizeof(u64));
891 drbd_uuid_set(device, UI_BITMAP, uuid);
892 drbd_print_uuids(device, "updated sync UUID");
893 drbd_md_sync(device);
894
895 sock = &peer_device->connection->data;
896 p = drbd_prepare_command(peer_device, sock);
897 if (p) {
898 p->uuid = cpu_to_be64(uuid);
899 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
900 }
901}
902
903
904static void
905assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
906 struct request_queue *q)
907{
908 if (q) {
909 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
910 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
911 p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
912 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
913 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
914 p->qlim->discard_enabled = blk_queue_discard(q);
915 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
916 } else {
917 q = device->rq_queue;
918 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
919 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
920 p->qlim->alignment_offset = 0;
921 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
922 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
923 p->qlim->discard_enabled = 0;
924 p->qlim->write_same_capable = 0;
925 }
926}
927
928int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
929{
930 struct drbd_device *device = peer_device->device;
931 struct drbd_socket *sock;
932 struct p_sizes *p;
933 sector_t d_size, u_size;
934 int q_order_type;
935 unsigned int max_bio_size;
936 unsigned int packet_size;
937
938 sock = &peer_device->connection->data;
939 p = drbd_prepare_command(peer_device, sock);
940 if (!p)
941 return -EIO;
942
943 packet_size = sizeof(*p);
944 if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
945 packet_size += sizeof(p->qlim[0]);
946
947 memset(p, 0, packet_size);
948 if (get_ldev_if_state(device, D_NEGOTIATING)) {
949 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
950 d_size = drbd_get_max_capacity(device->ldev);
951 rcu_read_lock();
952 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
953 rcu_read_unlock();
954 q_order_type = drbd_queue_order_type(device);
955 max_bio_size = queue_max_hw_sectors(q) << 9;
956 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
957 assign_p_sizes_qlim(device, p, q);
958 put_ldev(device);
959 } else {
960 d_size = 0;
961 u_size = 0;
962 q_order_type = QUEUE_ORDERED_NONE;
963 max_bio_size = DRBD_MAX_BIO_SIZE;
964 assign_p_sizes_qlim(device, p, NULL);
965 }
966
967 if (peer_device->connection->agreed_pro_version <= 94)
968 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
969 else if (peer_device->connection->agreed_pro_version < 100)
970 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
971
972 p->d_size = cpu_to_be64(d_size);
973 p->u_size = cpu_to_be64(u_size);
974 if (trigger_reply)
975 p->c_size = 0;
976 else
977 p->c_size = cpu_to_be64(get_capacity(device->vdisk));
978 p->max_bio_size = cpu_to_be32(max_bio_size);
979 p->queue_order_type = cpu_to_be16(q_order_type);
980 p->dds_flags = cpu_to_be16(flags);
981
982 return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
983}
984
985
986
987
988
989int drbd_send_current_state(struct drbd_peer_device *peer_device)
990{
991 struct drbd_socket *sock;
992 struct p_state *p;
993
994 sock = &peer_device->connection->data;
995 p = drbd_prepare_command(peer_device, sock);
996 if (!p)
997 return -EIO;
998 p->state = cpu_to_be32(peer_device->device->state.i);
999 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1000}
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1013{
1014 struct drbd_socket *sock;
1015 struct p_state *p;
1016
1017 sock = &peer_device->connection->data;
1018 p = drbd_prepare_command(peer_device, sock);
1019 if (!p)
1020 return -EIO;
1021 p->state = cpu_to_be32(state.i);
1022 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1023}
1024
1025int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1026{
1027 struct drbd_socket *sock;
1028 struct p_req_state *p;
1029
1030 sock = &peer_device->connection->data;
1031 p = drbd_prepare_command(peer_device, sock);
1032 if (!p)
1033 return -EIO;
1034 p->mask = cpu_to_be32(mask.i);
1035 p->val = cpu_to_be32(val.i);
1036 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1037}
1038
1039int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1040{
1041 enum drbd_packet cmd;
1042 struct drbd_socket *sock;
1043 struct p_req_state *p;
1044
1045 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1046 sock = &connection->data;
1047 p = conn_prepare_command(connection, sock);
1048 if (!p)
1049 return -EIO;
1050 p->mask = cpu_to_be32(mask.i);
1051 p->val = cpu_to_be32(val.i);
1052 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1053}
1054
1055void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1056{
1057 struct drbd_socket *sock;
1058 struct p_req_state_reply *p;
1059
1060 sock = &peer_device->connection->meta;
1061 p = drbd_prepare_command(peer_device, sock);
1062 if (p) {
1063 p->retcode = cpu_to_be32(retcode);
1064 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1065 }
1066}
1067
1068void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1069{
1070 struct drbd_socket *sock;
1071 struct p_req_state_reply *p;
1072 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1073
1074 sock = &connection->meta;
1075 p = conn_prepare_command(connection, sock);
1076 if (p) {
1077 p->retcode = cpu_to_be32(retcode);
1078 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1079 }
1080}
1081
1082static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1083{
1084 BUG_ON(code & ~0xf);
1085 p->encoding = (p->encoding & ~0xf) | code;
1086}
1087
1088static void dcbp_set_start(struct p_compressed_bm *p, int set)
1089{
1090 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1091}
1092
1093static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1094{
1095 BUG_ON(n & ~0x7);
1096 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1097}
1098
1099static int fill_bitmap_rle_bits(struct drbd_device *device,
1100 struct p_compressed_bm *p,
1101 unsigned int size,
1102 struct bm_xfer_ctx *c)
1103{
1104 struct bitstream bs;
1105 unsigned long plain_bits;
1106 unsigned long tmp;
1107 unsigned long rl;
1108 unsigned len;
1109 unsigned toggle;
1110 int bits, use_rle;
1111
1112
1113 rcu_read_lock();
1114 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1115 rcu_read_unlock();
1116 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1117 return 0;
1118
1119 if (c->bit_offset >= c->bm_bits)
1120 return 0;
1121
1122
1123 bitstream_init(&bs, p->code, size, 0);
1124 memset(p->code, 0, size);
1125
1126 plain_bits = 0;
1127
1128
1129
1130
1131 toggle = 2;
1132
1133
1134
1135 do {
1136 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1137 : _drbd_bm_find_next(device, c->bit_offset);
1138 if (tmp == -1UL)
1139 tmp = c->bm_bits;
1140 rl = tmp - c->bit_offset;
1141
1142 if (toggle == 2) {
1143 if (rl == 0) {
1144
1145
1146 dcbp_set_start(p, 1);
1147
1148 toggle = !toggle;
1149 continue;
1150 }
1151 dcbp_set_start(p, 0);
1152 }
1153
1154
1155
1156 if (rl == 0) {
1157 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1158 "t:%u bo:%lu\n", toggle, c->bit_offset);
1159 return -1;
1160 }
1161
1162 bits = vli_encode_bits(&bs, rl);
1163 if (bits == -ENOBUFS)
1164 break;
1165 if (bits <= 0) {
1166 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1167 return 0;
1168 }
1169
1170 toggle = !toggle;
1171 plain_bits += rl;
1172 c->bit_offset = tmp;
1173 } while (c->bit_offset < c->bm_bits);
1174
1175 len = bs.cur.b - p->code + !!bs.cur.bit;
1176
1177 if (plain_bits < (len << 3)) {
1178
1179
1180 c->bit_offset -= plain_bits;
1181 bm_xfer_ctx_bit_to_word_offset(c);
1182 c->bit_offset = c->word_offset * BITS_PER_LONG;
1183 return 0;
1184 }
1185
1186
1187
1188 bm_xfer_ctx_bit_to_word_offset(c);
1189
1190
1191 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1192
1193 return len;
1194}
1195
1196
1197
1198
1199
1200
1201
1202static int
1203send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1204{
1205 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1206 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1207 struct p_compressed_bm *p = sock->sbuf + header_size;
1208 int len, err;
1209
1210 len = fill_bitmap_rle_bits(device, p,
1211 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1212 if (len < 0)
1213 return -EIO;
1214
1215 if (len) {
1216 dcbp_set_code(p, RLE_VLI_Bits);
1217 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1218 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1219 NULL, 0);
1220 c->packets[0]++;
1221 c->bytes[0] += header_size + sizeof(*p) + len;
1222
1223 if (c->bit_offset >= c->bm_bits)
1224 len = 0;
1225 } else {
1226
1227
1228 unsigned int data_size;
1229 unsigned long num_words;
1230 unsigned long *p = sock->sbuf + header_size;
1231
1232 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1233 num_words = min_t(size_t, data_size / sizeof(*p),
1234 c->bm_words - c->word_offset);
1235 len = num_words * sizeof(*p);
1236 if (len)
1237 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1238 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1239 c->word_offset += num_words;
1240 c->bit_offset = c->word_offset * BITS_PER_LONG;
1241
1242 c->packets[1]++;
1243 c->bytes[1] += header_size + len;
1244
1245 if (c->bit_offset > c->bm_bits)
1246 c->bit_offset = c->bm_bits;
1247 }
1248 if (!err) {
1249 if (len == 0) {
1250 INFO_bm_xfer_stats(device, "send", c);
1251 return 0;
1252 } else
1253 return 1;
1254 }
1255 return -EIO;
1256}
1257
1258
1259static int _drbd_send_bitmap(struct drbd_device *device)
1260{
1261 struct bm_xfer_ctx c;
1262 int err;
1263
1264 if (!expect(device->bitmap))
1265 return false;
1266
1267 if (get_ldev(device)) {
1268 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1269 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1270 drbd_bm_set_all(device);
1271 if (drbd_bm_write(device)) {
1272
1273
1274
1275 drbd_err(device, "Failed to write bitmap to disk!\n");
1276 } else {
1277 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1278 drbd_md_sync(device);
1279 }
1280 }
1281 put_ldev(device);
1282 }
1283
1284 c = (struct bm_xfer_ctx) {
1285 .bm_bits = drbd_bm_bits(device),
1286 .bm_words = drbd_bm_words(device),
1287 };
1288
1289 do {
1290 err = send_bitmap_rle_or_plain(device, &c);
1291 } while (err > 0);
1292
1293 return err == 0;
1294}
1295
1296int drbd_send_bitmap(struct drbd_device *device)
1297{
1298 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1299 int err = -1;
1300
1301 mutex_lock(&sock->mutex);
1302 if (sock->socket)
1303 err = !_drbd_send_bitmap(device);
1304 mutex_unlock(&sock->mutex);
1305 return err;
1306}
1307
1308void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1309{
1310 struct drbd_socket *sock;
1311 struct p_barrier_ack *p;
1312
1313 if (connection->cstate < C_WF_REPORT_PARAMS)
1314 return;
1315
1316 sock = &connection->meta;
1317 p = conn_prepare_command(connection, sock);
1318 if (!p)
1319 return;
1320 p->barrier = barrier_nr;
1321 p->set_size = cpu_to_be32(set_size);
1322 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1334 u64 sector, u32 blksize, u64 block_id)
1335{
1336 struct drbd_socket *sock;
1337 struct p_block_ack *p;
1338
1339 if (peer_device->device->state.conn < C_CONNECTED)
1340 return -EIO;
1341
1342 sock = &peer_device->connection->meta;
1343 p = drbd_prepare_command(peer_device, sock);
1344 if (!p)
1345 return -EIO;
1346 p->sector = sector;
1347 p->block_id = block_id;
1348 p->blksize = blksize;
1349 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1350 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1351}
1352
1353
1354
1355
1356void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1357 struct p_data *dp, int data_size)
1358{
1359 if (peer_device->connection->peer_integrity_tfm)
1360 data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1361 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1362 dp->block_id);
1363}
1364
1365void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1366 struct p_block_req *rp)
1367{
1368 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1369}
1370
1371
1372
1373
1374
1375
1376
1377int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1378 struct drbd_peer_request *peer_req)
1379{
1380 return _drbd_send_ack(peer_device, cmd,
1381 cpu_to_be64(peer_req->i.sector),
1382 cpu_to_be32(peer_req->i.size),
1383 peer_req->block_id);
1384}
1385
1386
1387
1388int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1389 sector_t sector, int blksize, u64 block_id)
1390{
1391 return _drbd_send_ack(peer_device, cmd,
1392 cpu_to_be64(sector),
1393 cpu_to_be32(blksize),
1394 cpu_to_be64(block_id));
1395}
1396
1397int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1398 struct drbd_peer_request *peer_req)
1399{
1400 struct drbd_socket *sock;
1401 struct p_block_desc *p;
1402
1403 sock = &peer_device->connection->data;
1404 p = drbd_prepare_command(peer_device, sock);
1405 if (!p)
1406 return -EIO;
1407 p->sector = cpu_to_be64(peer_req->i.sector);
1408 p->blksize = cpu_to_be32(peer_req->i.size);
1409 p->pad = 0;
1410 return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1411}
1412
1413int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1414 sector_t sector, int size, u64 block_id)
1415{
1416 struct drbd_socket *sock;
1417 struct p_block_req *p;
1418
1419 sock = &peer_device->connection->data;
1420 p = drbd_prepare_command(peer_device, sock);
1421 if (!p)
1422 return -EIO;
1423 p->sector = cpu_to_be64(sector);
1424 p->block_id = block_id;
1425 p->blksize = cpu_to_be32(size);
1426 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1427}
1428
1429int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1430 void *digest, int digest_size, enum drbd_packet cmd)
1431{
1432 struct drbd_socket *sock;
1433 struct p_block_req *p;
1434
1435
1436
1437 sock = &peer_device->connection->data;
1438 p = drbd_prepare_command(peer_device, sock);
1439 if (!p)
1440 return -EIO;
1441 p->sector = cpu_to_be64(sector);
1442 p->block_id = ID_SYNCER ;
1443 p->blksize = cpu_to_be32(size);
1444 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1445}
1446
1447int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1448{
1449 struct drbd_socket *sock;
1450 struct p_block_req *p;
1451
1452 sock = &peer_device->connection->data;
1453 p = drbd_prepare_command(peer_device, sock);
1454 if (!p)
1455 return -EIO;
1456 p->sector = cpu_to_be64(sector);
1457 p->block_id = ID_SYNCER ;
1458 p->blksize = cpu_to_be32(size);
1459 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1460}
1461
1462
1463
1464
1465
1466static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1467{
1468 int drop_it;
1469
1470
1471 drop_it = connection->meta.socket == sock
1472 || !connection->ack_receiver.task
1473 || get_t_state(&connection->ack_receiver) != RUNNING
1474 || connection->cstate < C_WF_REPORT_PARAMS;
1475
1476 if (drop_it)
1477 return true;
1478
1479 drop_it = !--connection->ko_count;
1480 if (!drop_it) {
1481 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1482 current->comm, current->pid, connection->ko_count);
1483 request_ping(connection);
1484 }
1485
1486 return drop_it; ;
1487}
1488
1489static void drbd_update_congested(struct drbd_connection *connection)
1490{
1491 struct sock *sk = connection->data.socket->sk;
1492 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1493 set_bit(NET_CONGESTED, &connection->flags);
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1518 int offset, size_t size, unsigned msg_flags)
1519{
1520 struct socket *socket;
1521 void *addr;
1522 int err;
1523
1524 socket = peer_device->connection->data.socket;
1525 addr = kmap(page) + offset;
1526 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1527 kunmap(page);
1528 if (!err)
1529 peer_device->device->send_cnt += size >> 9;
1530 return err;
1531}
1532
1533static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1534 int offset, size_t size, unsigned msg_flags)
1535{
1536 struct socket *socket = peer_device->connection->data.socket;
1537 int len = size;
1538 int err = -EIO;
1539
1540
1541
1542
1543
1544
1545
1546 if (drbd_disable_sendpage || !sendpage_ok(page))
1547 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1548
1549 msg_flags |= MSG_NOSIGNAL;
1550 drbd_update_congested(peer_device->connection);
1551 do {
1552 int sent;
1553
1554 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1555 if (sent <= 0) {
1556 if (sent == -EAGAIN) {
1557 if (we_should_drop_the_connection(peer_device->connection, socket))
1558 break;
1559 continue;
1560 }
1561 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1562 __func__, (int)size, len, sent);
1563 if (sent < 0)
1564 err = sent;
1565 break;
1566 }
1567 len -= sent;
1568 offset += sent;
1569 } while (len > 0 );
1570 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1571
1572 if (len == 0) {
1573 err = 0;
1574 peer_device->device->send_cnt += size >> 9;
1575 }
1576 return err;
1577}
1578
1579static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1580{
1581 struct bio_vec bvec;
1582 struct bvec_iter iter;
1583
1584
1585 bio_for_each_segment(bvec, bio, iter) {
1586 int err;
1587
1588 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1589 bvec.bv_offset, bvec.bv_len,
1590 bio_iter_last(bvec, iter)
1591 ? 0 : MSG_MORE);
1592 if (err)
1593 return err;
1594
1595 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1596 break;
1597 }
1598 return 0;
1599}
1600
1601static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1602{
1603 struct bio_vec bvec;
1604 struct bvec_iter iter;
1605
1606
1607 bio_for_each_segment(bvec, bio, iter) {
1608 int err;
1609
1610 err = _drbd_send_page(peer_device, bvec.bv_page,
1611 bvec.bv_offset, bvec.bv_len,
1612 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1613 if (err)
1614 return err;
1615
1616 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1617 break;
1618 }
1619 return 0;
1620}
1621
1622static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1623 struct drbd_peer_request *peer_req)
1624{
1625 struct page *page = peer_req->pages;
1626 unsigned len = peer_req->i.size;
1627 int err;
1628
1629
1630 page_chain_for_each(page) {
1631 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1632
1633 err = _drbd_send_page(peer_device, page, 0, l,
1634 page_chain_next(page) ? MSG_MORE : 0);
1635 if (err)
1636 return err;
1637 len -= l;
1638 }
1639 return 0;
1640}
1641
1642static u32 bio_flags_to_wire(struct drbd_connection *connection,
1643 struct bio *bio)
1644{
1645 if (connection->agreed_pro_version >= 95)
1646 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1647 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1648 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1649 (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1650 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1651 (bio_op(bio) == REQ_OP_WRITE_ZEROES ?
1652 ((connection->agreed_features & DRBD_FF_WZEROES) ?
1653 (DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0))
1654 : DP_DISCARD)
1655 : 0);
1656 else
1657 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1658}
1659
1660
1661
1662
1663int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1664{
1665 struct drbd_device *device = peer_device->device;
1666 struct drbd_socket *sock;
1667 struct p_data *p;
1668 struct p_wsame *wsame = NULL;
1669 void *digest_out;
1670 unsigned int dp_flags = 0;
1671 int digest_size;
1672 int err;
1673
1674 sock = &peer_device->connection->data;
1675 p = drbd_prepare_command(peer_device, sock);
1676 digest_size = peer_device->connection->integrity_tfm ?
1677 crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
1678
1679 if (!p)
1680 return -EIO;
1681 p->sector = cpu_to_be64(req->i.sector);
1682 p->block_id = (unsigned long)req;
1683 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1684 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1685 if (device->state.conn >= C_SYNC_SOURCE &&
1686 device->state.conn <= C_PAUSED_SYNC_T)
1687 dp_flags |= DP_MAY_SET_IN_SYNC;
1688 if (peer_device->connection->agreed_pro_version >= 100) {
1689 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1690 dp_flags |= DP_SEND_RECEIVE_ACK;
1691
1692
1693 if (req->rq_state & RQ_EXP_WRITE_ACK
1694 || (dp_flags & DP_MAY_SET_IN_SYNC))
1695 dp_flags |= DP_SEND_WRITE_ACK;
1696 }
1697 p->dp_flags = cpu_to_be32(dp_flags);
1698
1699 if (dp_flags & (DP_DISCARD|DP_ZEROES)) {
1700 enum drbd_packet cmd = (dp_flags & DP_ZEROES) ? P_ZEROES : P_TRIM;
1701 struct p_trim *t = (struct p_trim*)p;
1702 t->size = cpu_to_be32(req->i.size);
1703 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
1704 goto out;
1705 }
1706 if (dp_flags & DP_WSAME) {
1707
1708
1709
1710 wsame = (struct p_wsame*)p;
1711 digest_out = wsame + 1;
1712 wsame->size = cpu_to_be32(req->i.size);
1713 } else
1714 digest_out = p + 1;
1715
1716
1717
1718 if (digest_size)
1719 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1720 if (wsame) {
1721 err =
1722 __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1723 sizeof(*wsame) + digest_size, NULL,
1724 bio_iovec(req->master_bio).bv_len);
1725 } else
1726 err =
1727 __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1728 sizeof(*p) + digest_size, NULL, req->i.size);
1729 if (!err) {
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1742 err = _drbd_send_bio(peer_device, req->master_bio);
1743 else
1744 err = _drbd_send_zc_bio(peer_device, req->master_bio);
1745
1746
1747 if (digest_size > 0 && digest_size <= 64) {
1748
1749
1750 unsigned char digest[64];
1751 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1752 if (memcmp(p + 1, digest, digest_size)) {
1753 drbd_warn(device,
1754 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1755 (unsigned long long)req->i.sector, req->i.size);
1756 }
1757 }
1758
1759
1760 }
1761out:
1762 mutex_unlock(&sock->mutex);
1763
1764 return err;
1765}
1766
1767
1768
1769
1770
1771int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1772 struct drbd_peer_request *peer_req)
1773{
1774 struct drbd_device *device = peer_device->device;
1775 struct drbd_socket *sock;
1776 struct p_data *p;
1777 int err;
1778 int digest_size;
1779
1780 sock = &peer_device->connection->data;
1781 p = drbd_prepare_command(peer_device, sock);
1782
1783 digest_size = peer_device->connection->integrity_tfm ?
1784 crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
1785
1786 if (!p)
1787 return -EIO;
1788 p->sector = cpu_to_be64(peer_req->i.sector);
1789 p->block_id = peer_req->block_id;
1790 p->seq_num = 0;
1791 p->dp_flags = 0;
1792 if (digest_size)
1793 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1794 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1795 if (!err)
1796 err = _drbd_send_zc_ee(peer_device, peer_req);
1797 mutex_unlock(&sock->mutex);
1798
1799 return err;
1800}
1801
1802int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1803{
1804 struct drbd_socket *sock;
1805 struct p_block_desc *p;
1806
1807 sock = &peer_device->connection->data;
1808 p = drbd_prepare_command(peer_device, sock);
1809 if (!p)
1810 return -EIO;
1811 p->sector = cpu_to_be64(req->i.sector);
1812 p->blksize = cpu_to_be32(req->i.size);
1813 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832int drbd_send(struct drbd_connection *connection, struct socket *sock,
1833 void *buf, size_t size, unsigned msg_flags)
1834{
1835 struct kvec iov = {.iov_base = buf, .iov_len = size};
1836 struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
1837 int rv, sent = 0;
1838
1839 if (!sock)
1840 return -EBADR;
1841
1842
1843
1844 iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size);
1845
1846 if (sock == connection->data.socket) {
1847 rcu_read_lock();
1848 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1849 rcu_read_unlock();
1850 drbd_update_congested(connection);
1851 }
1852 do {
1853 rv = sock_sendmsg(sock, &msg);
1854 if (rv == -EAGAIN) {
1855 if (we_should_drop_the_connection(connection, sock))
1856 break;
1857 else
1858 continue;
1859 }
1860 if (rv == -EINTR) {
1861 flush_signals(current);
1862 rv = 0;
1863 }
1864 if (rv < 0)
1865 break;
1866 sent += rv;
1867 } while (sent < size);
1868
1869 if (sock == connection->data.socket)
1870 clear_bit(NET_CONGESTED, &connection->flags);
1871
1872 if (rv <= 0) {
1873 if (rv != -EAGAIN) {
1874 drbd_err(connection, "%s_sendmsg returned %d\n",
1875 sock == connection->meta.socket ? "msock" : "sock",
1876 rv);
1877 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1878 } else
1879 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1880 }
1881
1882 return sent;
1883}
1884
1885
1886
1887
1888
1889
1890int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1891 size_t size, unsigned msg_flags)
1892{
1893 int err;
1894
1895 err = drbd_send(connection, sock, buffer, size, msg_flags);
1896 if (err < 0)
1897 return err;
1898 if (err != size)
1899 return -EIO;
1900 return 0;
1901}
1902
1903static int drbd_open(struct block_device *bdev, fmode_t mode)
1904{
1905 struct drbd_device *device = bdev->bd_disk->private_data;
1906 unsigned long flags;
1907 int rv = 0;
1908
1909 mutex_lock(&drbd_main_mutex);
1910 spin_lock_irqsave(&device->resource->req_lock, flags);
1911
1912
1913
1914 if (device->state.role != R_PRIMARY) {
1915 if (mode & FMODE_WRITE)
1916 rv = -EROFS;
1917 else if (!drbd_allow_oos)
1918 rv = -EMEDIUMTYPE;
1919 }
1920
1921 if (!rv)
1922 device->open_cnt++;
1923 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1924 mutex_unlock(&drbd_main_mutex);
1925
1926 return rv;
1927}
1928
1929static void drbd_release(struct gendisk *gd, fmode_t mode)
1930{
1931 struct drbd_device *device = gd->private_data;
1932 mutex_lock(&drbd_main_mutex);
1933 device->open_cnt--;
1934 mutex_unlock(&drbd_main_mutex);
1935}
1936
1937
1938void drbd_queue_unplug(struct drbd_device *device)
1939{
1940 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1941 D_ASSERT(device, device->state.role == R_PRIMARY);
1942 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1943 drbd_queue_work_if_unqueued(
1944 &first_peer_device(device)->connection->sender_work,
1945 &device->unplug_work);
1946 }
1947 }
1948}
1949
1950static void drbd_set_defaults(struct drbd_device *device)
1951{
1952
1953
1954 device->state = (union drbd_dev_state) {
1955 { .role = R_SECONDARY,
1956 .peer = R_UNKNOWN,
1957 .conn = C_STANDALONE,
1958 .disk = D_DISKLESS,
1959 .pdsk = D_UNKNOWN,
1960 } };
1961}
1962
1963void drbd_init_set_defaults(struct drbd_device *device)
1964{
1965
1966
1967
1968 drbd_set_defaults(device);
1969
1970 atomic_set(&device->ap_bio_cnt, 0);
1971 atomic_set(&device->ap_actlog_cnt, 0);
1972 atomic_set(&device->ap_pending_cnt, 0);
1973 atomic_set(&device->rs_pending_cnt, 0);
1974 atomic_set(&device->unacked_cnt, 0);
1975 atomic_set(&device->local_cnt, 0);
1976 atomic_set(&device->pp_in_use_by_net, 0);
1977 atomic_set(&device->rs_sect_in, 0);
1978 atomic_set(&device->rs_sect_ev, 0);
1979 atomic_set(&device->ap_in_flight, 0);
1980 atomic_set(&device->md_io.in_use, 0);
1981
1982 mutex_init(&device->own_state_mutex);
1983 device->state_mutex = &device->own_state_mutex;
1984
1985 spin_lock_init(&device->al_lock);
1986 spin_lock_init(&device->peer_seq_lock);
1987
1988 INIT_LIST_HEAD(&device->active_ee);
1989 INIT_LIST_HEAD(&device->sync_ee);
1990 INIT_LIST_HEAD(&device->done_ee);
1991 INIT_LIST_HEAD(&device->read_ee);
1992 INIT_LIST_HEAD(&device->net_ee);
1993 INIT_LIST_HEAD(&device->resync_reads);
1994 INIT_LIST_HEAD(&device->resync_work.list);
1995 INIT_LIST_HEAD(&device->unplug_work.list);
1996 INIT_LIST_HEAD(&device->bm_io_work.w.list);
1997 INIT_LIST_HEAD(&device->pending_master_completion[0]);
1998 INIT_LIST_HEAD(&device->pending_master_completion[1]);
1999 INIT_LIST_HEAD(&device->pending_completion[0]);
2000 INIT_LIST_HEAD(&device->pending_completion[1]);
2001
2002 device->resync_work.cb = w_resync_timer;
2003 device->unplug_work.cb = w_send_write_hint;
2004 device->bm_io_work.w.cb = w_bitmap_io;
2005
2006 timer_setup(&device->resync_timer, resync_timer_fn, 0);
2007 timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
2008 timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
2009 timer_setup(&device->request_timer, request_timer_fn, 0);
2010
2011 init_waitqueue_head(&device->misc_wait);
2012 init_waitqueue_head(&device->state_wait);
2013 init_waitqueue_head(&device->ee_wait);
2014 init_waitqueue_head(&device->al_wait);
2015 init_waitqueue_head(&device->seq_wait);
2016
2017 device->resync_wenr = LC_FREE;
2018 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2019 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2020}
2021
2022void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
2023{
2024 char ppb[10];
2025
2026 set_capacity_and_notify(device->vdisk, size);
2027
2028 drbd_info(device, "size = %s (%llu KB)\n",
2029 ppsize(ppb, size>>1), (unsigned long long)size>>1);
2030}
2031
2032void drbd_device_cleanup(struct drbd_device *device)
2033{
2034 int i;
2035 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2036 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2037 first_peer_device(device)->connection->receiver.t_state);
2038
2039 device->al_writ_cnt =
2040 device->bm_writ_cnt =
2041 device->read_cnt =
2042 device->recv_cnt =
2043 device->send_cnt =
2044 device->writ_cnt =
2045 device->p_size =
2046 device->rs_start =
2047 device->rs_total =
2048 device->rs_failed = 0;
2049 device->rs_last_events = 0;
2050 device->rs_last_sect_ev = 0;
2051 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2052 device->rs_mark_left[i] = 0;
2053 device->rs_mark_time[i] = 0;
2054 }
2055 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2056
2057 set_capacity_and_notify(device->vdisk, 0);
2058 if (device->bitmap) {
2059
2060 drbd_bm_resize(device, 0, 1);
2061 drbd_bm_cleanup(device);
2062 }
2063
2064 drbd_backing_dev_free(device, device->ldev);
2065 device->ldev = NULL;
2066
2067 clear_bit(AL_SUSPENDED, &device->flags);
2068
2069 D_ASSERT(device, list_empty(&device->active_ee));
2070 D_ASSERT(device, list_empty(&device->sync_ee));
2071 D_ASSERT(device, list_empty(&device->done_ee));
2072 D_ASSERT(device, list_empty(&device->read_ee));
2073 D_ASSERT(device, list_empty(&device->net_ee));
2074 D_ASSERT(device, list_empty(&device->resync_reads));
2075 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2076 D_ASSERT(device, list_empty(&device->resync_work.list));
2077 D_ASSERT(device, list_empty(&device->unplug_work.list));
2078
2079 drbd_set_defaults(device);
2080}
2081
2082
2083static void drbd_destroy_mempools(void)
2084{
2085 struct page *page;
2086
2087 while (drbd_pp_pool) {
2088 page = drbd_pp_pool;
2089 drbd_pp_pool = (struct page *)page_private(page);
2090 __free_page(page);
2091 drbd_pp_vacant--;
2092 }
2093
2094
2095
2096 bioset_exit(&drbd_io_bio_set);
2097 bioset_exit(&drbd_md_io_bio_set);
2098 mempool_exit(&drbd_md_io_page_pool);
2099 mempool_exit(&drbd_ee_mempool);
2100 mempool_exit(&drbd_request_mempool);
2101 kmem_cache_destroy(drbd_ee_cache);
2102 kmem_cache_destroy(drbd_request_cache);
2103 kmem_cache_destroy(drbd_bm_ext_cache);
2104 kmem_cache_destroy(drbd_al_ext_cache);
2105
2106 drbd_ee_cache = NULL;
2107 drbd_request_cache = NULL;
2108 drbd_bm_ext_cache = NULL;
2109 drbd_al_ext_cache = NULL;
2110
2111 return;
2112}
2113
2114static int drbd_create_mempools(void)
2115{
2116 struct page *page;
2117 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
2118 int i, ret;
2119
2120
2121 drbd_request_cache = kmem_cache_create(
2122 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2123 if (drbd_request_cache == NULL)
2124 goto Enomem;
2125
2126 drbd_ee_cache = kmem_cache_create(
2127 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2128 if (drbd_ee_cache == NULL)
2129 goto Enomem;
2130
2131 drbd_bm_ext_cache = kmem_cache_create(
2132 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2133 if (drbd_bm_ext_cache == NULL)
2134 goto Enomem;
2135
2136 drbd_al_ext_cache = kmem_cache_create(
2137 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2138 if (drbd_al_ext_cache == NULL)
2139 goto Enomem;
2140
2141
2142 ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
2143 if (ret)
2144 goto Enomem;
2145
2146 ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
2147 BIOSET_NEED_BVECS);
2148 if (ret)
2149 goto Enomem;
2150
2151 ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
2152 if (ret)
2153 goto Enomem;
2154
2155 ret = mempool_init_slab_pool(&drbd_request_mempool, number,
2156 drbd_request_cache);
2157 if (ret)
2158 goto Enomem;
2159
2160 ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
2161 if (ret)
2162 goto Enomem;
2163
2164
2165 spin_lock_init(&drbd_pp_lock);
2166
2167 for (i = 0; i < number; i++) {
2168 page = alloc_page(GFP_HIGHUSER);
2169 if (!page)
2170 goto Enomem;
2171 set_page_private(page, (unsigned long)drbd_pp_pool);
2172 drbd_pp_pool = page;
2173 }
2174 drbd_pp_vacant = number;
2175
2176 return 0;
2177
2178Enomem:
2179 drbd_destroy_mempools();
2180 return -ENOMEM;
2181}
2182
2183static void drbd_release_all_peer_reqs(struct drbd_device *device)
2184{
2185 int rr;
2186
2187 rr = drbd_free_peer_reqs(device, &device->active_ee);
2188 if (rr)
2189 drbd_err(device, "%d EEs in active list found!\n", rr);
2190
2191 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2192 if (rr)
2193 drbd_err(device, "%d EEs in sync list found!\n", rr);
2194
2195 rr = drbd_free_peer_reqs(device, &device->read_ee);
2196 if (rr)
2197 drbd_err(device, "%d EEs in read list found!\n", rr);
2198
2199 rr = drbd_free_peer_reqs(device, &device->done_ee);
2200 if (rr)
2201 drbd_err(device, "%d EEs in done list found!\n", rr);
2202
2203 rr = drbd_free_peer_reqs(device, &device->net_ee);
2204 if (rr)
2205 drbd_err(device, "%d EEs in net list found!\n", rr);
2206}
2207
2208
2209void drbd_destroy_device(struct kref *kref)
2210{
2211 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2212 struct drbd_resource *resource = device->resource;
2213 struct drbd_peer_device *peer_device, *tmp_peer_device;
2214
2215 del_timer_sync(&device->request_timer);
2216
2217
2218 D_ASSERT(device, device->open_cnt == 0);
2219
2220
2221
2222
2223
2224 drbd_backing_dev_free(device, device->ldev);
2225 device->ldev = NULL;
2226
2227 drbd_release_all_peer_reqs(device);
2228
2229 lc_destroy(device->act_log);
2230 lc_destroy(device->resync);
2231
2232 kfree(device->p_uuid);
2233
2234
2235 if (device->bitmap)
2236 drbd_bm_cleanup(device);
2237 __free_page(device->md_io.page);
2238 put_disk(device->vdisk);
2239 blk_cleanup_queue(device->rq_queue);
2240 kfree(device->rs_plan_s);
2241
2242
2243
2244
2245 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2246 kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2247 kfree(peer_device);
2248 }
2249 memset(device, 0xfd, sizeof(*device));
2250 kfree(device);
2251 kref_put(&resource->kref, drbd_destroy_resource);
2252}
2253
2254
2255
2256
2257static struct retry_worker {
2258 struct workqueue_struct *wq;
2259 struct work_struct worker;
2260
2261 spinlock_t lock;
2262 struct list_head writes;
2263} retry;
2264
2265static void do_retry(struct work_struct *ws)
2266{
2267 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2268 LIST_HEAD(writes);
2269 struct drbd_request *req, *tmp;
2270
2271 spin_lock_irq(&retry->lock);
2272 list_splice_init(&retry->writes, &writes);
2273 spin_unlock_irq(&retry->lock);
2274
2275 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2276 struct drbd_device *device = req->device;
2277 struct bio *bio = req->master_bio;
2278 bool expected;
2279
2280 expected =
2281 expect(atomic_read(&req->completion_ref) == 0) &&
2282 expect(req->rq_state & RQ_POSTPONED) &&
2283 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2284 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2285
2286 if (!expected)
2287 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2288 req, atomic_read(&req->completion_ref),
2289 req->rq_state);
2290
2291
2292
2293
2294
2295
2296 kref_put(&req->kref, drbd_req_destroy);
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311 inc_ap_bio(device);
2312 __drbd_make_request(device, bio);
2313 }
2314}
2315
2316
2317
2318void drbd_restart_request(struct drbd_request *req)
2319{
2320 unsigned long flags;
2321 spin_lock_irqsave(&retry.lock, flags);
2322 list_move_tail(&req->tl_requests, &retry.writes);
2323 spin_unlock_irqrestore(&retry.lock, flags);
2324
2325
2326
2327
2328 dec_ap_bio(req->device);
2329
2330 queue_work(retry.wq, &retry.worker);
2331}
2332
2333void drbd_destroy_resource(struct kref *kref)
2334{
2335 struct drbd_resource *resource =
2336 container_of(kref, struct drbd_resource, kref);
2337
2338 idr_destroy(&resource->devices);
2339 free_cpumask_var(resource->cpu_mask);
2340 kfree(resource->name);
2341 memset(resource, 0xf2, sizeof(*resource));
2342 kfree(resource);
2343}
2344
2345void drbd_free_resource(struct drbd_resource *resource)
2346{
2347 struct drbd_connection *connection, *tmp;
2348
2349 for_each_connection_safe(connection, tmp, resource) {
2350 list_del(&connection->connections);
2351 drbd_debugfs_connection_cleanup(connection);
2352 kref_put(&connection->kref, drbd_destroy_connection);
2353 }
2354 drbd_debugfs_resource_cleanup(resource);
2355 kref_put(&resource->kref, drbd_destroy_resource);
2356}
2357
2358static void drbd_cleanup(void)
2359{
2360 unsigned int i;
2361 struct drbd_device *device;
2362 struct drbd_resource *resource, *tmp;
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372 if (drbd_proc)
2373 remove_proc_entry("drbd", NULL);
2374
2375 if (retry.wq)
2376 destroy_workqueue(retry.wq);
2377
2378 drbd_genl_unregister();
2379
2380 idr_for_each_entry(&drbd_devices, device, i)
2381 drbd_delete_device(device);
2382
2383
2384 for_each_resource_safe(resource, tmp, &drbd_resources) {
2385 list_del(&resource->resources);
2386 drbd_free_resource(resource);
2387 }
2388
2389 drbd_debugfs_cleanup();
2390
2391 drbd_destroy_mempools();
2392 unregister_blkdev(DRBD_MAJOR, "drbd");
2393
2394 idr_destroy(&drbd_devices);
2395
2396 pr_info("module cleanup done.\n");
2397}
2398
2399static void drbd_init_workqueue(struct drbd_work_queue* wq)
2400{
2401 spin_lock_init(&wq->q_lock);
2402 INIT_LIST_HEAD(&wq->q);
2403 init_waitqueue_head(&wq->q_wait);
2404}
2405
2406struct completion_work {
2407 struct drbd_work w;
2408 struct completion done;
2409};
2410
2411static int w_complete(struct drbd_work *w, int cancel)
2412{
2413 struct completion_work *completion_work =
2414 container_of(w, struct completion_work, w);
2415
2416 complete(&completion_work->done);
2417 return 0;
2418}
2419
2420void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2421{
2422 struct completion_work completion_work;
2423
2424 completion_work.w.cb = w_complete;
2425 init_completion(&completion_work.done);
2426 drbd_queue_work(work_queue, &completion_work.w);
2427 wait_for_completion(&completion_work.done);
2428}
2429
2430struct drbd_resource *drbd_find_resource(const char *name)
2431{
2432 struct drbd_resource *resource;
2433
2434 if (!name || !name[0])
2435 return NULL;
2436
2437 rcu_read_lock();
2438 for_each_resource_rcu(resource, &drbd_resources) {
2439 if (!strcmp(resource->name, name)) {
2440 kref_get(&resource->kref);
2441 goto found;
2442 }
2443 }
2444 resource = NULL;
2445found:
2446 rcu_read_unlock();
2447 return resource;
2448}
2449
2450struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2451 void *peer_addr, int peer_addr_len)
2452{
2453 struct drbd_resource *resource;
2454 struct drbd_connection *connection;
2455
2456 rcu_read_lock();
2457 for_each_resource_rcu(resource, &drbd_resources) {
2458 for_each_connection_rcu(connection, resource) {
2459 if (connection->my_addr_len == my_addr_len &&
2460 connection->peer_addr_len == peer_addr_len &&
2461 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2462 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2463 kref_get(&connection->kref);
2464 goto found;
2465 }
2466 }
2467 }
2468 connection = NULL;
2469found:
2470 rcu_read_unlock();
2471 return connection;
2472}
2473
2474static int drbd_alloc_socket(struct drbd_socket *socket)
2475{
2476 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2477 if (!socket->rbuf)
2478 return -ENOMEM;
2479 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2480 if (!socket->sbuf)
2481 return -ENOMEM;
2482 return 0;
2483}
2484
2485static void drbd_free_socket(struct drbd_socket *socket)
2486{
2487 free_page((unsigned long) socket->sbuf);
2488 free_page((unsigned long) socket->rbuf);
2489}
2490
2491void conn_free_crypto(struct drbd_connection *connection)
2492{
2493 drbd_free_sock(connection);
2494
2495 crypto_free_shash(connection->csums_tfm);
2496 crypto_free_shash(connection->verify_tfm);
2497 crypto_free_shash(connection->cram_hmac_tfm);
2498 crypto_free_shash(connection->integrity_tfm);
2499 crypto_free_shash(connection->peer_integrity_tfm);
2500 kfree(connection->int_dig_in);
2501 kfree(connection->int_dig_vv);
2502
2503 connection->csums_tfm = NULL;
2504 connection->verify_tfm = NULL;
2505 connection->cram_hmac_tfm = NULL;
2506 connection->integrity_tfm = NULL;
2507 connection->peer_integrity_tfm = NULL;
2508 connection->int_dig_in = NULL;
2509 connection->int_dig_vv = NULL;
2510}
2511
2512int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2513{
2514 struct drbd_connection *connection;
2515 cpumask_var_t new_cpu_mask;
2516 int err;
2517
2518 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2519 return -ENOMEM;
2520
2521
2522 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2523 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2524 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2525 if (err == -EOVERFLOW) {
2526
2527 cpumask_var_t tmp_cpu_mask;
2528 if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2529 cpumask_setall(tmp_cpu_mask);
2530 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2531 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2532 res_opts->cpu_mask,
2533 strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2534 nr_cpu_ids);
2535 free_cpumask_var(tmp_cpu_mask);
2536 err = 0;
2537 }
2538 }
2539 if (err) {
2540 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2541
2542 goto fail;
2543 }
2544 }
2545 resource->res_opts = *res_opts;
2546 if (cpumask_empty(new_cpu_mask))
2547 drbd_calc_cpu_mask(&new_cpu_mask);
2548 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2549 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2550 for_each_connection_rcu(connection, resource) {
2551 connection->receiver.reset_cpu_mask = 1;
2552 connection->ack_receiver.reset_cpu_mask = 1;
2553 connection->worker.reset_cpu_mask = 1;
2554 }
2555 }
2556 err = 0;
2557
2558fail:
2559 free_cpumask_var(new_cpu_mask);
2560 return err;
2561
2562}
2563
2564struct drbd_resource *drbd_create_resource(const char *name)
2565{
2566 struct drbd_resource *resource;
2567
2568 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2569 if (!resource)
2570 goto fail;
2571 resource->name = kstrdup(name, GFP_KERNEL);
2572 if (!resource->name)
2573 goto fail_free_resource;
2574 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2575 goto fail_free_name;
2576 kref_init(&resource->kref);
2577 idr_init(&resource->devices);
2578 INIT_LIST_HEAD(&resource->connections);
2579 resource->write_ordering = WO_BDEV_FLUSH;
2580 list_add_tail_rcu(&resource->resources, &drbd_resources);
2581 mutex_init(&resource->conf_update);
2582 mutex_init(&resource->adm_mutex);
2583 spin_lock_init(&resource->req_lock);
2584 drbd_debugfs_resource_add(resource);
2585 return resource;
2586
2587fail_free_name:
2588 kfree(resource->name);
2589fail_free_resource:
2590 kfree(resource);
2591fail:
2592 return NULL;
2593}
2594
2595
2596struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2597{
2598 struct drbd_resource *resource;
2599 struct drbd_connection *connection;
2600
2601 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2602 if (!connection)
2603 return NULL;
2604
2605 if (drbd_alloc_socket(&connection->data))
2606 goto fail;
2607 if (drbd_alloc_socket(&connection->meta))
2608 goto fail;
2609
2610 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2611 if (!connection->current_epoch)
2612 goto fail;
2613
2614 INIT_LIST_HEAD(&connection->transfer_log);
2615
2616 INIT_LIST_HEAD(&connection->current_epoch->list);
2617 connection->epochs = 1;
2618 spin_lock_init(&connection->epoch_lock);
2619
2620 connection->send.seen_any_write_yet = false;
2621 connection->send.current_epoch_nr = 0;
2622 connection->send.current_epoch_writes = 0;
2623
2624 resource = drbd_create_resource(name);
2625 if (!resource)
2626 goto fail;
2627
2628 connection->cstate = C_STANDALONE;
2629 mutex_init(&connection->cstate_mutex);
2630 init_waitqueue_head(&connection->ping_wait);
2631 idr_init(&connection->peer_devices);
2632
2633 drbd_init_workqueue(&connection->sender_work);
2634 mutex_init(&connection->data.mutex);
2635 mutex_init(&connection->meta.mutex);
2636
2637 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2638 connection->receiver.connection = connection;
2639 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2640 connection->worker.connection = connection;
2641 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2642 connection->ack_receiver.connection = connection;
2643
2644 kref_init(&connection->kref);
2645
2646 connection->resource = resource;
2647
2648 if (set_resource_options(resource, res_opts))
2649 goto fail_resource;
2650
2651 kref_get(&resource->kref);
2652 list_add_tail_rcu(&connection->connections, &resource->connections);
2653 drbd_debugfs_connection_add(connection);
2654 return connection;
2655
2656fail_resource:
2657 list_del(&resource->resources);
2658 drbd_free_resource(resource);
2659fail:
2660 kfree(connection->current_epoch);
2661 drbd_free_socket(&connection->meta);
2662 drbd_free_socket(&connection->data);
2663 kfree(connection);
2664 return NULL;
2665}
2666
2667void drbd_destroy_connection(struct kref *kref)
2668{
2669 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2670 struct drbd_resource *resource = connection->resource;
2671
2672 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2673 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2674 kfree(connection->current_epoch);
2675
2676 idr_destroy(&connection->peer_devices);
2677
2678 drbd_free_socket(&connection->meta);
2679 drbd_free_socket(&connection->data);
2680 kfree(connection->int_dig_in);
2681 kfree(connection->int_dig_vv);
2682 memset(connection, 0xfc, sizeof(*connection));
2683 kfree(connection);
2684 kref_put(&resource->kref, drbd_destroy_resource);
2685}
2686
2687static int init_submitter(struct drbd_device *device)
2688{
2689
2690
2691 device->submit.wq =
2692 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2693 if (!device->submit.wq)
2694 return -ENOMEM;
2695
2696 INIT_WORK(&device->submit.worker, do_submit);
2697 INIT_LIST_HEAD(&device->submit.writes);
2698 return 0;
2699}
2700
2701enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2702{
2703 struct drbd_resource *resource = adm_ctx->resource;
2704 struct drbd_connection *connection;
2705 struct drbd_device *device;
2706 struct drbd_peer_device *peer_device, *tmp_peer_device;
2707 struct gendisk *disk;
2708 struct request_queue *q;
2709 int id;
2710 int vnr = adm_ctx->volume;
2711 enum drbd_ret_code err = ERR_NOMEM;
2712
2713 device = minor_to_device(minor);
2714 if (device)
2715 return ERR_MINOR_OR_VOLUME_EXISTS;
2716
2717
2718 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2719 if (!device)
2720 return ERR_NOMEM;
2721 kref_init(&device->kref);
2722
2723 kref_get(&resource->kref);
2724 device->resource = resource;
2725 device->minor = minor;
2726 device->vnr = vnr;
2727
2728 drbd_init_set_defaults(device);
2729
2730 q = blk_alloc_queue(NUMA_NO_NODE);
2731 if (!q)
2732 goto out_no_q;
2733 device->rq_queue = q;
2734
2735 disk = alloc_disk(1);
2736 if (!disk)
2737 goto out_no_disk;
2738 device->vdisk = disk;
2739
2740 set_disk_ro(disk, true);
2741
2742 disk->queue = q;
2743 disk->major = DRBD_MAJOR;
2744 disk->first_minor = minor;
2745 disk->fops = &drbd_ops;
2746 sprintf(disk->disk_name, "drbd%d", minor);
2747 disk->private_data = device;
2748
2749 blk_queue_write_cache(q, true, true);
2750
2751
2752 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2753
2754 device->md_io.page = alloc_page(GFP_KERNEL);
2755 if (!device->md_io.page)
2756 goto out_no_io_page;
2757
2758 if (drbd_bm_init(device))
2759 goto out_no_bitmap;
2760 device->read_requests = RB_ROOT;
2761 device->write_requests = RB_ROOT;
2762
2763 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2764 if (id < 0) {
2765 if (id == -ENOSPC)
2766 err = ERR_MINOR_OR_VOLUME_EXISTS;
2767 goto out_no_minor_idr;
2768 }
2769 kref_get(&device->kref);
2770
2771 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2772 if (id < 0) {
2773 if (id == -ENOSPC)
2774 err = ERR_MINOR_OR_VOLUME_EXISTS;
2775 goto out_idr_remove_minor;
2776 }
2777 kref_get(&device->kref);
2778
2779 INIT_LIST_HEAD(&device->peer_devices);
2780 INIT_LIST_HEAD(&device->pending_bitmap_io);
2781 for_each_connection(connection, resource) {
2782 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2783 if (!peer_device)
2784 goto out_idr_remove_from_resource;
2785 peer_device->connection = connection;
2786 peer_device->device = device;
2787
2788 list_add(&peer_device->peer_devices, &device->peer_devices);
2789 kref_get(&device->kref);
2790
2791 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2792 if (id < 0) {
2793 if (id == -ENOSPC)
2794 err = ERR_INVALID_REQUEST;
2795 goto out_idr_remove_from_resource;
2796 }
2797 kref_get(&connection->kref);
2798 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2799 }
2800
2801 if (init_submitter(device)) {
2802 err = ERR_NOMEM;
2803 goto out_idr_remove_vol;
2804 }
2805
2806 add_disk(disk);
2807
2808
2809 device->state.conn = first_connection(resource)->cstate;
2810 if (device->state.conn == C_WF_REPORT_PARAMS) {
2811 for_each_peer_device(peer_device, device)
2812 drbd_connected(peer_device);
2813 }
2814
2815 for_each_peer_device(peer_device, device)
2816 drbd_debugfs_peer_device_add(peer_device);
2817 drbd_debugfs_device_add(device);
2818 return NO_ERROR;
2819
2820out_idr_remove_vol:
2821 idr_remove(&connection->peer_devices, vnr);
2822out_idr_remove_from_resource:
2823 for_each_connection(connection, resource) {
2824 peer_device = idr_remove(&connection->peer_devices, vnr);
2825 if (peer_device)
2826 kref_put(&connection->kref, drbd_destroy_connection);
2827 }
2828 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2829 list_del(&peer_device->peer_devices);
2830 kfree(peer_device);
2831 }
2832 idr_remove(&resource->devices, vnr);
2833out_idr_remove_minor:
2834 idr_remove(&drbd_devices, minor);
2835 synchronize_rcu();
2836out_no_minor_idr:
2837 drbd_bm_cleanup(device);
2838out_no_bitmap:
2839 __free_page(device->md_io.page);
2840out_no_io_page:
2841 put_disk(disk);
2842out_no_disk:
2843 blk_cleanup_queue(q);
2844out_no_q:
2845 kref_put(&resource->kref, drbd_destroy_resource);
2846 kfree(device);
2847 return err;
2848}
2849
2850void drbd_delete_device(struct drbd_device *device)
2851{
2852 struct drbd_resource *resource = device->resource;
2853 struct drbd_connection *connection;
2854 struct drbd_peer_device *peer_device;
2855
2856
2857 for_each_peer_device(peer_device, device)
2858 drbd_debugfs_peer_device_cleanup(peer_device);
2859 drbd_debugfs_device_cleanup(device);
2860 for_each_connection(connection, resource) {
2861 idr_remove(&connection->peer_devices, device->vnr);
2862 kref_put(&device->kref, drbd_destroy_device);
2863 }
2864 idr_remove(&resource->devices, device->vnr);
2865 kref_put(&device->kref, drbd_destroy_device);
2866 idr_remove(&drbd_devices, device_to_minor(device));
2867 kref_put(&device->kref, drbd_destroy_device);
2868 del_gendisk(device->vdisk);
2869 synchronize_rcu();
2870 kref_put(&device->kref, drbd_destroy_device);
2871}
2872
2873static int __init drbd_init(void)
2874{
2875 int err;
2876
2877 if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
2878 pr_err("invalid minor_count (%d)\n", drbd_minor_count);
2879#ifdef MODULE
2880 return -EINVAL;
2881#else
2882 drbd_minor_count = DRBD_MINOR_COUNT_DEF;
2883#endif
2884 }
2885
2886 err = register_blkdev(DRBD_MAJOR, "drbd");
2887 if (err) {
2888 pr_err("unable to register block device major %d\n",
2889 DRBD_MAJOR);
2890 return err;
2891 }
2892
2893
2894
2895
2896 init_waitqueue_head(&drbd_pp_wait);
2897
2898 drbd_proc = NULL;
2899 idr_init(&drbd_devices);
2900
2901 mutex_init(&resources_mutex);
2902 INIT_LIST_HEAD(&drbd_resources);
2903
2904 err = drbd_genl_register();
2905 if (err) {
2906 pr_err("unable to register generic netlink family\n");
2907 goto fail;
2908 }
2909
2910 err = drbd_create_mempools();
2911 if (err)
2912 goto fail;
2913
2914 err = -ENOMEM;
2915 drbd_proc = proc_create_single("drbd", S_IFREG | 0444 , NULL, drbd_seq_show);
2916 if (!drbd_proc) {
2917 pr_err("unable to register proc file\n");
2918 goto fail;
2919 }
2920
2921 retry.wq = create_singlethread_workqueue("drbd-reissue");
2922 if (!retry.wq) {
2923 pr_err("unable to create retry workqueue\n");
2924 goto fail;
2925 }
2926 INIT_WORK(&retry.worker, do_retry);
2927 spin_lock_init(&retry.lock);
2928 INIT_LIST_HEAD(&retry.writes);
2929
2930 drbd_debugfs_init();
2931
2932 pr_info("initialized. "
2933 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2934 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2935 pr_info("%s\n", drbd_buildtag());
2936 pr_info("registered as block device major %d\n", DRBD_MAJOR);
2937 return 0;
2938
2939fail:
2940 drbd_cleanup();
2941 if (err == -ENOMEM)
2942 pr_err("ran out of memory\n");
2943 else
2944 pr_err("initialization failure\n");
2945 return err;
2946}
2947
2948static void drbd_free_one_sock(struct drbd_socket *ds)
2949{
2950 struct socket *s;
2951 mutex_lock(&ds->mutex);
2952 s = ds->socket;
2953 ds->socket = NULL;
2954 mutex_unlock(&ds->mutex);
2955 if (s) {
2956
2957 synchronize_rcu();
2958 kernel_sock_shutdown(s, SHUT_RDWR);
2959 sock_release(s);
2960 }
2961}
2962
2963void drbd_free_sock(struct drbd_connection *connection)
2964{
2965 if (connection->data.socket)
2966 drbd_free_one_sock(&connection->data);
2967 if (connection->meta.socket)
2968 drbd_free_one_sock(&connection->meta);
2969}
2970
2971
2972
2973void conn_md_sync(struct drbd_connection *connection)
2974{
2975 struct drbd_peer_device *peer_device;
2976 int vnr;
2977
2978 rcu_read_lock();
2979 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2980 struct drbd_device *device = peer_device->device;
2981
2982 kref_get(&device->kref);
2983 rcu_read_unlock();
2984 drbd_md_sync(device);
2985 kref_put(&device->kref, drbd_destroy_device);
2986 rcu_read_lock();
2987 }
2988 rcu_read_unlock();
2989}
2990
2991
2992struct meta_data_on_disk {
2993 u64 la_size_sect;
2994 u64 uuid[UI_SIZE];
2995 u64 device_uuid;
2996 u64 reserved_u64_1;
2997 u32 flags;
2998 u32 magic;
2999 u32 md_size_sect;
3000 u32 al_offset;
3001 u32 al_nr_extents;
3002
3003 u32 bm_offset;
3004 u32 bm_bytes_per_bit;
3005 u32 la_peer_max_bio_size;
3006
3007
3008 u32 al_stripes;
3009 u32 al_stripe_size_4k;
3010
3011 u8 reserved_u8[4096 - (7*8 + 10*4)];
3012} __packed;
3013
3014
3015
3016void drbd_md_write(struct drbd_device *device, void *b)
3017{
3018 struct meta_data_on_disk *buffer = b;
3019 sector_t sector;
3020 int i;
3021
3022 memset(buffer, 0, sizeof(*buffer));
3023
3024 buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk));
3025 for (i = UI_CURRENT; i < UI_SIZE; i++)
3026 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3027 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3028 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3029
3030 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3031 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3032 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3033 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3034 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3035
3036 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3037 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3038
3039 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3040 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3041
3042 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3043 sector = device->ldev->md.md_offset;
3044
3045 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3046
3047 drbd_err(device, "meta data update failed!\n");
3048 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3049 }
3050}
3051
3052
3053
3054
3055
3056void drbd_md_sync(struct drbd_device *device)
3057{
3058 struct meta_data_on_disk *buffer;
3059
3060
3061 BUILD_BUG_ON(UI_SIZE != 4);
3062 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3063
3064 del_timer(&device->md_sync_timer);
3065
3066 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3067 return;
3068
3069
3070
3071 if (!get_ldev_if_state(device, D_FAILED))
3072 return;
3073
3074 buffer = drbd_md_get_buffer(device, __func__);
3075 if (!buffer)
3076 goto out;
3077
3078 drbd_md_write(device, buffer);
3079
3080
3081
3082 device->ldev->md.la_size_sect = get_capacity(device->vdisk);
3083
3084 drbd_md_put_buffer(device);
3085out:
3086 put_ldev(device);
3087}
3088
3089static int check_activity_log_stripe_size(struct drbd_device *device,
3090 struct meta_data_on_disk *on_disk,
3091 struct drbd_md *in_core)
3092{
3093 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3094 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3095 u64 al_size_4k;
3096
3097
3098 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3099 al_stripes = 1;
3100 al_stripe_size_4k = MD_32kB_SECT/8;
3101 }
3102
3103
3104
3105
3106 if (al_stripes == 0 || al_stripe_size_4k == 0)
3107 goto err;
3108
3109 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3110
3111
3112
3113
3114
3115 if (al_size_4k > (16 * 1024 * 1024/4))
3116 goto err;
3117
3118
3119
3120 if (al_size_4k < MD_32kB_SECT/8)
3121 goto err;
3122
3123 in_core->al_stripe_size_4k = al_stripe_size_4k;
3124 in_core->al_stripes = al_stripes;
3125 in_core->al_size_4k = al_size_4k;
3126
3127 return 0;
3128err:
3129 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3130 al_stripes, al_stripe_size_4k);
3131 return -EINVAL;
3132}
3133
3134static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3135{
3136 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3137 struct drbd_md *in_core = &bdev->md;
3138 s32 on_disk_al_sect;
3139 s32 on_disk_bm_sect;
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149 if (in_core->al_offset < 0) {
3150 if (in_core->bm_offset > in_core->al_offset)
3151 goto err;
3152 on_disk_al_sect = -in_core->al_offset;
3153 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3154 } else {
3155 if (in_core->al_offset != MD_4kB_SECT)
3156 goto err;
3157 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3158 goto err;
3159
3160 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3161 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3162 }
3163
3164
3165 if (in_core->meta_dev_idx >= 0) {
3166 if (in_core->md_size_sect != MD_128MB_SECT
3167 || in_core->al_offset != MD_4kB_SECT
3168 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3169 || in_core->al_stripes != 1
3170 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3171 goto err;
3172 }
3173
3174 if (capacity < in_core->md_size_sect)
3175 goto err;
3176 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3177 goto err;
3178
3179
3180 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3181 goto err;
3182
3183
3184
3185 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3186 goto err;
3187
3188
3189 if (in_core->bm_offset & 7)
3190 goto err;
3191
3192
3193
3194
3195 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3196 goto err;
3197
3198 return 0;
3199
3200err:
3201 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3202 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3203 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3204 in_core->meta_dev_idx,
3205 in_core->al_stripes, in_core->al_stripe_size_4k,
3206 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3207 (unsigned long long)in_core->la_size_sect,
3208 (unsigned long long)capacity);
3209
3210 return -EINVAL;
3211}
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3226{
3227 struct meta_data_on_disk *buffer;
3228 u32 magic, flags;
3229 int i, rv = NO_ERROR;
3230
3231 if (device->state.disk != D_DISKLESS)
3232 return ERR_DISK_CONFIGURED;
3233
3234 buffer = drbd_md_get_buffer(device, __func__);
3235 if (!buffer)
3236 return ERR_NOMEM;
3237
3238
3239
3240 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3241 bdev->md.md_offset = drbd_md_ss(bdev);
3242
3243
3244
3245 bdev->md.md_size_sect = 8;
3246
3247 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3248 REQ_OP_READ)) {
3249
3250
3251 drbd_err(device, "Error while reading metadata.\n");
3252 rv = ERR_IO_MD_DISK;
3253 goto err;
3254 }
3255
3256 magic = be32_to_cpu(buffer->magic);
3257 flags = be32_to_cpu(buffer->flags);
3258 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3259 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3260
3261 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3262 rv = ERR_MD_UNCLEAN;
3263 goto err;
3264 }
3265
3266 rv = ERR_MD_INVALID;
3267 if (magic != DRBD_MD_MAGIC_08) {
3268 if (magic == DRBD_MD_MAGIC_07)
3269 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3270 else
3271 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3272 goto err;
3273 }
3274
3275 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3276 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3277 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3278 goto err;
3279 }
3280
3281
3282
3283 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3284 for (i = UI_CURRENT; i < UI_SIZE; i++)
3285 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3286 bdev->md.flags = be32_to_cpu(buffer->flags);
3287 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3288
3289 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3290 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3291 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3292
3293 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3294 goto err;
3295 if (check_offsets_and_sizes(device, bdev))
3296 goto err;
3297
3298 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3299 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3300 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3301 goto err;
3302 }
3303 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3304 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3305 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3306 goto err;
3307 }
3308
3309 rv = NO_ERROR;
3310
3311 spin_lock_irq(&device->resource->req_lock);
3312 if (device->state.conn < C_CONNECTED) {
3313 unsigned int peer;
3314 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3315 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3316 device->peer_max_bio_size = peer;
3317 }
3318 spin_unlock_irq(&device->resource->req_lock);
3319
3320 err:
3321 drbd_md_put_buffer(device);
3322
3323 return rv;
3324}
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334void drbd_md_mark_dirty(struct drbd_device *device)
3335{
3336 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3337 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3338}
3339
3340void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3341{
3342 int i;
3343
3344 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3345 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3346}
3347
3348void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3349{
3350 if (idx == UI_CURRENT) {
3351 if (device->state.role == R_PRIMARY)
3352 val |= 1;
3353 else
3354 val &= ~((u64)1);
3355
3356 drbd_set_ed_uuid(device, val);
3357 }
3358
3359 device->ldev->md.uuid[idx] = val;
3360 drbd_md_mark_dirty(device);
3361}
3362
3363void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3364{
3365 unsigned long flags;
3366 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3367 __drbd_uuid_set(device, idx, val);
3368 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3369}
3370
3371void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3372{
3373 unsigned long flags;
3374 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3375 if (device->ldev->md.uuid[idx]) {
3376 drbd_uuid_move_history(device);
3377 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3378 }
3379 __drbd_uuid_set(device, idx, val);
3380 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3381}
3382
3383
3384
3385
3386
3387
3388
3389
3390void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3391{
3392 u64 val;
3393 unsigned long long bm_uuid;
3394
3395 get_random_bytes(&val, sizeof(u64));
3396
3397 spin_lock_irq(&device->ldev->md.uuid_lock);
3398 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3399
3400 if (bm_uuid)
3401 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3402
3403 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3404 __drbd_uuid_set(device, UI_CURRENT, val);
3405 spin_unlock_irq(&device->ldev->md.uuid_lock);
3406
3407 drbd_print_uuids(device, "new current UUID");
3408
3409 drbd_md_sync(device);
3410}
3411
3412void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3413{
3414 unsigned long flags;
3415 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3416 return;
3417
3418 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3419 if (val == 0) {
3420 drbd_uuid_move_history(device);
3421 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3422 device->ldev->md.uuid[UI_BITMAP] = 0;
3423 } else {
3424 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3425 if (bm_uuid)
3426 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3427
3428 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3429 }
3430 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3431
3432 drbd_md_mark_dirty(device);
3433}
3434
3435
3436
3437
3438
3439
3440
3441int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3442{
3443 int rv = -EIO;
3444
3445 drbd_md_set_flag(device, MDF_FULL_SYNC);
3446 drbd_md_sync(device);
3447 drbd_bm_set_all(device);
3448
3449 rv = drbd_bm_write(device);
3450
3451 if (!rv) {
3452 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3453 drbd_md_sync(device);
3454 }
3455
3456 return rv;
3457}
3458
3459
3460
3461
3462
3463
3464
3465int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3466{
3467 drbd_resume_al(device);
3468 drbd_bm_clear_all(device);
3469 return drbd_bm_write(device);
3470}
3471
3472static int w_bitmap_io(struct drbd_work *w, int unused)
3473{
3474 struct drbd_device *device =
3475 container_of(w, struct drbd_device, bm_io_work.w);
3476 struct bm_io_work *work = &device->bm_io_work;
3477 int rv = -EIO;
3478
3479 if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3480 int cnt = atomic_read(&device->ap_bio_cnt);
3481 if (cnt)
3482 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3483 cnt, work->why);
3484 }
3485
3486 if (get_ldev(device)) {
3487 drbd_bm_lock(device, work->why, work->flags);
3488 rv = work->io_fn(device);
3489 drbd_bm_unlock(device);
3490 put_ldev(device);
3491 }
3492
3493 clear_bit_unlock(BITMAP_IO, &device->flags);
3494 wake_up(&device->misc_wait);
3495
3496 if (work->done)
3497 work->done(device, rv);
3498
3499 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3500 work->why = NULL;
3501 work->flags = 0;
3502
3503 return 0;
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521void drbd_queue_bitmap_io(struct drbd_device *device,
3522 int (*io_fn)(struct drbd_device *),
3523 void (*done)(struct drbd_device *, int),
3524 char *why, enum bm_flag flags)
3525{
3526 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3527
3528 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3529 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3530 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3531 if (device->bm_io_work.why)
3532 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3533 why, device->bm_io_work.why);
3534
3535 device->bm_io_work.io_fn = io_fn;
3536 device->bm_io_work.done = done;
3537 device->bm_io_work.why = why;
3538 device->bm_io_work.flags = flags;
3539
3540 spin_lock_irq(&device->resource->req_lock);
3541 set_bit(BITMAP_IO, &device->flags);
3542
3543
3544 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3545 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3546 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3547 &device->bm_io_work.w);
3548 }
3549 spin_unlock_irq(&device->resource->req_lock);
3550}
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3562 char *why, enum bm_flag flags)
3563{
3564
3565 const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3566 int rv;
3567
3568 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3569
3570 if (do_suspend_io)
3571 drbd_suspend_io(device);
3572
3573 drbd_bm_lock(device, why, flags);
3574 rv = io_fn(device);
3575 drbd_bm_unlock(device);
3576
3577 if (do_suspend_io)
3578 drbd_resume_io(device);
3579
3580 return rv;
3581}
3582
3583void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3584{
3585 if ((device->ldev->md.flags & flag) != flag) {
3586 drbd_md_mark_dirty(device);
3587 device->ldev->md.flags |= flag;
3588 }
3589}
3590
3591void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3592{
3593 if ((device->ldev->md.flags & flag) != 0) {
3594 drbd_md_mark_dirty(device);
3595 device->ldev->md.flags &= ~flag;
3596 }
3597}
3598int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3599{
3600 return (bdev->md.flags & flag) != 0;
3601}
3602
3603static void md_sync_timer_fn(struct timer_list *t)
3604{
3605 struct drbd_device *device = from_timer(device, t, md_sync_timer);
3606 drbd_device_post_work(device, MD_SYNC);
3607}
3608
3609const char *cmdname(enum drbd_packet cmd)
3610{
3611
3612
3613
3614 static const char *cmdnames[] = {
3615 [P_DATA] = "Data",
3616 [P_WSAME] = "WriteSame",
3617 [P_TRIM] = "Trim",
3618 [P_DATA_REPLY] = "DataReply",
3619 [P_RS_DATA_REPLY] = "RSDataReply",
3620 [P_BARRIER] = "Barrier",
3621 [P_BITMAP] = "ReportBitMap",
3622 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3623 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3624 [P_UNPLUG_REMOTE] = "UnplugRemote",
3625 [P_DATA_REQUEST] = "DataRequest",
3626 [P_RS_DATA_REQUEST] = "RSDataRequest",
3627 [P_SYNC_PARAM] = "SyncParam",
3628 [P_SYNC_PARAM89] = "SyncParam89",
3629 [P_PROTOCOL] = "ReportProtocol",
3630 [P_UUIDS] = "ReportUUIDs",
3631 [P_SIZES] = "ReportSizes",
3632 [P_STATE] = "ReportState",
3633 [P_SYNC_UUID] = "ReportSyncUUID",
3634 [P_AUTH_CHALLENGE] = "AuthChallenge",
3635 [P_AUTH_RESPONSE] = "AuthResponse",
3636 [P_PING] = "Ping",
3637 [P_PING_ACK] = "PingAck",
3638 [P_RECV_ACK] = "RecvAck",
3639 [P_WRITE_ACK] = "WriteAck",
3640 [P_RS_WRITE_ACK] = "RSWriteAck",
3641 [P_SUPERSEDED] = "Superseded",
3642 [P_NEG_ACK] = "NegAck",
3643 [P_NEG_DREPLY] = "NegDReply",
3644 [P_NEG_RS_DREPLY] = "NegRSDReply",
3645 [P_BARRIER_ACK] = "BarrierAck",
3646 [P_STATE_CHG_REQ] = "StateChgRequest",
3647 [P_STATE_CHG_REPLY] = "StateChgReply",
3648 [P_OV_REQUEST] = "OVRequest",
3649 [P_OV_REPLY] = "OVReply",
3650 [P_OV_RESULT] = "OVResult",
3651 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3652 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3653 [P_COMPRESSED_BITMAP] = "CBitmap",
3654 [P_DELAY_PROBE] = "DelayProbe",
3655 [P_OUT_OF_SYNC] = "OutOfSync",
3656 [P_RETRY_WRITE] = "RetryWrite",
3657 [P_RS_CANCEL] = "RSCancel",
3658 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3659 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
3660 [P_RETRY_WRITE] = "retry_write",
3661 [P_PROTOCOL_UPDATE] = "protocol_update",
3662 [P_RS_THIN_REQ] = "rs_thin_req",
3663 [P_RS_DEALLOCATED] = "rs_deallocated",
3664
3665
3666
3667
3668
3669 };
3670
3671
3672 if (cmd == P_INITIAL_META)
3673 return "InitialMeta";
3674 if (cmd == P_INITIAL_DATA)
3675 return "InitialData";
3676 if (cmd == P_CONNECTION_FEATURES)
3677 return "ConnectionFeatures";
3678 if (cmd >= ARRAY_SIZE(cmdnames))
3679 return "Unknown";
3680 return cmdnames[cmd];
3681}
3682
3683
3684
3685
3686
3687
3688
3689int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3690{
3691 struct net_conf *nc;
3692 DEFINE_WAIT(wait);
3693 long timeout;
3694
3695 rcu_read_lock();
3696 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3697 if (!nc) {
3698 rcu_read_unlock();
3699 return -ETIMEDOUT;
3700 }
3701 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3702 rcu_read_unlock();
3703
3704
3705 i->waiting = true;
3706 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3707 spin_unlock_irq(&device->resource->req_lock);
3708 timeout = schedule_timeout(timeout);
3709 finish_wait(&device->misc_wait, &wait);
3710 spin_lock_irq(&device->resource->req_lock);
3711 if (!timeout || device->state.conn < C_CONNECTED)
3712 return -ETIMEDOUT;
3713 if (signal_pending(current))
3714 return -ERESTARTSYS;
3715 return 0;
3716}
3717
3718void lock_all_resources(void)
3719{
3720 struct drbd_resource *resource;
3721 int __maybe_unused i = 0;
3722
3723 mutex_lock(&resources_mutex);
3724 local_irq_disable();
3725 for_each_resource(resource, &drbd_resources)
3726 spin_lock_nested(&resource->req_lock, i++);
3727}
3728
3729void unlock_all_resources(void)
3730{
3731 struct drbd_resource *resource;
3732
3733 for_each_resource(resource, &drbd_resources)
3734 spin_unlock(&resource->req_lock);
3735 local_irq_enable();
3736 mutex_unlock(&resources_mutex);
3737}
3738
3739#ifdef CONFIG_DRBD_FAULT_INJECTION
3740
3741
3742struct fault_random_state {
3743 unsigned long state;
3744 unsigned long count;
3745};
3746
3747#define FAULT_RANDOM_MULT 39916801
3748#define FAULT_RANDOM_ADD 479001701
3749#define FAULT_RANDOM_REFRESH 10000
3750
3751
3752
3753
3754
3755static unsigned long
3756_drbd_fault_random(struct fault_random_state *rsp)
3757{
3758 long refresh;
3759
3760 if (!rsp->count--) {
3761 get_random_bytes(&refresh, sizeof(refresh));
3762 rsp->state += refresh;
3763 rsp->count = FAULT_RANDOM_REFRESH;
3764 }
3765 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3766 return swahw32(rsp->state);
3767}
3768
3769static char *
3770_drbd_fault_str(unsigned int type) {
3771 static char *_faults[] = {
3772 [DRBD_FAULT_MD_WR] = "Meta-data write",
3773 [DRBD_FAULT_MD_RD] = "Meta-data read",
3774 [DRBD_FAULT_RS_WR] = "Resync write",
3775 [DRBD_FAULT_RS_RD] = "Resync read",
3776 [DRBD_FAULT_DT_WR] = "Data write",
3777 [DRBD_FAULT_DT_RD] = "Data read",
3778 [DRBD_FAULT_DT_RA] = "Data read ahead",
3779 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3780 [DRBD_FAULT_AL_EE] = "EE allocation",
3781 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3782 };
3783
3784 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3785}
3786
3787unsigned int
3788_drbd_insert_fault(struct drbd_device *device, unsigned int type)
3789{
3790 static struct fault_random_state rrs = {0, 0};
3791
3792 unsigned int ret = (
3793 (drbd_fault_devs == 0 ||
3794 ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3795 (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
3796
3797 if (ret) {
3798 drbd_fault_count++;
3799
3800 if (__ratelimit(&drbd_ratelimit_state))
3801 drbd_warn(device, "***Simulating %s failure\n",
3802 _drbd_fault_str(type));
3803 }
3804
3805 return ret;
3806}
3807#endif
3808
3809const char *drbd_buildtag(void)
3810{
3811
3812
3813
3814 static char buildtag[38] = "\0uilt-in";
3815
3816 if (buildtag[0] == 0) {
3817#ifdef MODULE
3818 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3819#else
3820 buildtag[0] = 'b';
3821#endif
3822 }
3823
3824 return buildtag;
3825}
3826
3827module_init(drbd_init)
3828module_exit(drbd_cleanup)
3829
3830EXPORT_SYMBOL(drbd_conn_str);
3831EXPORT_SYMBOL(drbd_role_str);
3832EXPORT_SYMBOL(drbd_disk_str);
3833EXPORT_SYMBOL(drbd_set_st_err_str);
3834