linux/drivers/block/drbd/drbd_nl.c
<<
>>
Prefs
   1/*
   2   drbd_nl.c
   3
   4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
   5
   6   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   7   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   8   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
   9
  10   drbd is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation; either version 2, or (at your option)
  13   any later version.
  14
  15   drbd is distributed in the hope that it will be useful,
  16   but WITHOUT ANY WARRANTY; without even the implied warranty of
  17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18   GNU General Public License for more details.
  19
  20   You should have received a copy of the GNU General Public License
  21   along with drbd; see the file COPYING.  If not, write to
  22   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23
  24 */
  25
  26#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  27
  28#include <linux/module.h>
  29#include <linux/drbd.h>
  30#include <linux/in.h>
  31#include <linux/fs.h>
  32#include <linux/file.h>
  33#include <linux/slab.h>
  34#include <linux/blkpg.h>
  35#include <linux/cpumask.h>
  36#include "drbd_int.h"
  37#include "drbd_protocol.h"
  38#include "drbd_req.h"
  39#include "drbd_state_change.h"
  40#include <asm/unaligned.h>
  41#include <linux/drbd_limits.h>
  42#include <linux/kthread.h>
  43
  44#include <net/genetlink.h>
  45
  46/* .doit */
  47// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
  48// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
  49
  50int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
  51int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
  52
  53int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
  54int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
  55int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
  56
  57int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
  58int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
  59int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
  60int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
  61int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
  62int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
  63int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
  64int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
  65int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
  66int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
  67int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
  68int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
  69int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
  70int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
  71int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
  72int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
  73int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
  74int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
  75int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
  76int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
  77/* .dumpit */
  78int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
  79int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
  80int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
  81int drbd_adm_dump_devices_done(struct netlink_callback *cb);
  82int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
  83int drbd_adm_dump_connections_done(struct netlink_callback *cb);
  84int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
  85int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
  86int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
  87
  88#include <linux/drbd_genl_api.h>
  89#include "drbd_nla.h"
  90#include <linux/genl_magic_func.h>
  91
  92static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
  93static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
  94
  95DEFINE_MUTEX(notification_mutex);
  96
  97/* used blkdev_get_by_path, to claim our meta data device(s) */
  98static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  99
 100static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
 101{
 102        genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
 103        if (genlmsg_reply(skb, info))
 104                pr_err("error sending genl reply\n");
 105}
 106
 107/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
 108 * reason it could fail was no space in skb, and there are 4k available. */
 109static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
 110{
 111        struct nlattr *nla;
 112        int err = -EMSGSIZE;
 113
 114        if (!info || !info[0])
 115                return 0;
 116
 117        nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
 118        if (!nla)
 119                return err;
 120
 121        err = nla_put_string(skb, T_info_text, info);
 122        if (err) {
 123                nla_nest_cancel(skb, nla);
 124                return err;
 125        } else
 126                nla_nest_end(skb, nla);
 127        return 0;
 128}
 129
 130/* This would be a good candidate for a "pre_doit" hook,
 131 * and per-family private info->pointers.
 132 * But we need to stay compatible with older kernels.
 133 * If it returns successfully, adm_ctx members are valid.
 134 *
 135 * At this point, we still rely on the global genl_lock().
 136 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
 137 * to add additional synchronization against object destruction/modification.
 138 */
 139#define DRBD_ADM_NEED_MINOR     1
 140#define DRBD_ADM_NEED_RESOURCE  2
 141#define DRBD_ADM_NEED_CONNECTION 4
 142static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
 143        struct sk_buff *skb, struct genl_info *info, unsigned flags)
 144{
 145        struct drbd_genlmsghdr *d_in = info->userhdr;
 146        const u8 cmd = info->genlhdr->cmd;
 147        int err;
 148
 149        memset(adm_ctx, 0, sizeof(*adm_ctx));
 150
 151        /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
 152        if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
 153               return -EPERM;
 154
 155        adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
 156        if (!adm_ctx->reply_skb) {
 157                err = -ENOMEM;
 158                goto fail;
 159        }
 160
 161        adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
 162                                        info, &drbd_genl_family, 0, cmd);
 163        /* put of a few bytes into a fresh skb of >= 4k will always succeed.
 164         * but anyways */
 165        if (!adm_ctx->reply_dh) {
 166                err = -ENOMEM;
 167                goto fail;
 168        }
 169
 170        adm_ctx->reply_dh->minor = d_in->minor;
 171        adm_ctx->reply_dh->ret_code = NO_ERROR;
 172
 173        adm_ctx->volume = VOLUME_UNSPECIFIED;
 174        if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
 175                struct nlattr *nla;
 176                /* parse and validate only */
 177                err = drbd_cfg_context_from_attrs(NULL, info);
 178                if (err)
 179                        goto fail;
 180
 181                /* It was present, and valid,
 182                 * copy it over to the reply skb. */
 183                err = nla_put_nohdr(adm_ctx->reply_skb,
 184                                info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
 185                                info->attrs[DRBD_NLA_CFG_CONTEXT]);
 186                if (err)
 187                        goto fail;
 188
 189                /* and assign stuff to the adm_ctx */
 190                nla = nested_attr_tb[__nla_type(T_ctx_volume)];
 191                if (nla)
 192                        adm_ctx->volume = nla_get_u32(nla);
 193                nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
 194                if (nla)
 195                        adm_ctx->resource_name = nla_data(nla);
 196                adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
 197                adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
 198                if ((adm_ctx->my_addr &&
 199                     nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
 200                    (adm_ctx->peer_addr &&
 201                     nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
 202                        err = -EINVAL;
 203                        goto fail;
 204                }
 205        }
 206
 207        adm_ctx->minor = d_in->minor;
 208        adm_ctx->device = minor_to_device(d_in->minor);
 209
 210        /* We are protected by the global genl_lock().
 211         * But we may explicitly drop it/retake it in drbd_adm_set_role(),
 212         * so make sure this object stays around. */
 213        if (adm_ctx->device)
 214                kref_get(&adm_ctx->device->kref);
 215
 216        if (adm_ctx->resource_name) {
 217                adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
 218        }
 219
 220        if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
 221                drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
 222                return ERR_MINOR_INVALID;
 223        }
 224        if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
 225                drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
 226                if (adm_ctx->resource_name)
 227                        return ERR_RES_NOT_KNOWN;
 228                return ERR_INVALID_REQUEST;
 229        }
 230
 231        if (flags & DRBD_ADM_NEED_CONNECTION) {
 232                if (adm_ctx->resource) {
 233                        drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
 234                        return ERR_INVALID_REQUEST;
 235                }
 236                if (adm_ctx->device) {
 237                        drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
 238                        return ERR_INVALID_REQUEST;
 239                }
 240                if (adm_ctx->my_addr && adm_ctx->peer_addr)
 241                        adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
 242                                                          nla_len(adm_ctx->my_addr),
 243                                                          nla_data(adm_ctx->peer_addr),
 244                                                          nla_len(adm_ctx->peer_addr));
 245                if (!adm_ctx->connection) {
 246                        drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
 247                        return ERR_INVALID_REQUEST;
 248                }
 249        }
 250
 251        /* some more paranoia, if the request was over-determined */
 252        if (adm_ctx->device && adm_ctx->resource &&
 253            adm_ctx->device->resource != adm_ctx->resource) {
 254                pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
 255                                adm_ctx->minor, adm_ctx->resource->name,
 256                                adm_ctx->device->resource->name);
 257                drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
 258                return ERR_INVALID_REQUEST;
 259        }
 260        if (adm_ctx->device &&
 261            adm_ctx->volume != VOLUME_UNSPECIFIED &&
 262            adm_ctx->volume != adm_ctx->device->vnr) {
 263                pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
 264                                adm_ctx->minor, adm_ctx->volume,
 265                                adm_ctx->device->vnr,
 266                                adm_ctx->device->resource->name);
 267                drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
 268                return ERR_INVALID_REQUEST;
 269        }
 270
 271        /* still, provide adm_ctx->resource always, if possible. */
 272        if (!adm_ctx->resource) {
 273                adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
 274                        : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
 275                if (adm_ctx->resource)
 276                        kref_get(&adm_ctx->resource->kref);
 277        }
 278
 279        return NO_ERROR;
 280
 281fail:
 282        nlmsg_free(adm_ctx->reply_skb);
 283        adm_ctx->reply_skb = NULL;
 284        return err;
 285}
 286
 287static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
 288        struct genl_info *info, int retcode)
 289{
 290        if (adm_ctx->device) {
 291                kref_put(&adm_ctx->device->kref, drbd_destroy_device);
 292                adm_ctx->device = NULL;
 293        }
 294        if (adm_ctx->connection) {
 295                kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
 296                adm_ctx->connection = NULL;
 297        }
 298        if (adm_ctx->resource) {
 299                kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
 300                adm_ctx->resource = NULL;
 301        }
 302
 303        if (!adm_ctx->reply_skb)
 304                return -ENOMEM;
 305
 306        adm_ctx->reply_dh->ret_code = retcode;
 307        drbd_adm_send_reply(adm_ctx->reply_skb, info);
 308        return 0;
 309}
 310
 311static void setup_khelper_env(struct drbd_connection *connection, char **envp)
 312{
 313        char *afs;
 314
 315        /* FIXME: A future version will not allow this case. */
 316        if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
 317                return;
 318
 319        switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
 320        case AF_INET6:
 321                afs = "ipv6";
 322                snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
 323                         &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
 324                break;
 325        case AF_INET:
 326                afs = "ipv4";
 327                snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
 328                         &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
 329                break;
 330        default:
 331                afs = "ssocks";
 332                snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
 333                         &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
 334        }
 335        snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
 336}
 337
 338int drbd_khelper(struct drbd_device *device, char *cmd)
 339{
 340        char *envp[] = { "HOME=/",
 341                        "TERM=linux",
 342                        "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 343                         (char[20]) { }, /* address family */
 344                         (char[60]) { }, /* address */
 345                        NULL };
 346        char mb[14];
 347        char *argv[] = {usermode_helper, cmd, mb, NULL };
 348        struct drbd_connection *connection = first_peer_device(device)->connection;
 349        struct sib_info sib;
 350        int ret;
 351
 352        if (current == connection->worker.task)
 353                set_bit(CALLBACK_PENDING, &connection->flags);
 354
 355        snprintf(mb, 14, "minor-%d", device_to_minor(device));
 356        setup_khelper_env(connection, envp);
 357
 358        /* The helper may take some time.
 359         * write out any unsynced meta data changes now */
 360        drbd_md_sync(device);
 361
 362        drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
 363        sib.sib_reason = SIB_HELPER_PRE;
 364        sib.helper_name = cmd;
 365        drbd_bcast_event(device, &sib);
 366        notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
 367        ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
 368        if (ret)
 369                drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
 370                                usermode_helper, cmd, mb,
 371                                (ret >> 8) & 0xff, ret);
 372        else
 373                drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
 374                                usermode_helper, cmd, mb,
 375                                (ret >> 8) & 0xff, ret);
 376        sib.sib_reason = SIB_HELPER_POST;
 377        sib.helper_exit_code = ret;
 378        drbd_bcast_event(device, &sib);
 379        notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
 380
 381        if (current == connection->worker.task)
 382                clear_bit(CALLBACK_PENDING, &connection->flags);
 383
 384        if (ret < 0) /* Ignore any ERRNOs we got. */
 385                ret = 0;
 386
 387        return ret;
 388}
 389
 390enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
 391{
 392        char *envp[] = { "HOME=/",
 393                        "TERM=linux",
 394                        "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 395                         (char[20]) { }, /* address family */
 396                         (char[60]) { }, /* address */
 397                        NULL };
 398        char *resource_name = connection->resource->name;
 399        char *argv[] = {usermode_helper, cmd, resource_name, NULL };
 400        int ret;
 401
 402        setup_khelper_env(connection, envp);
 403        conn_md_sync(connection);
 404
 405        drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
 406        /* TODO: conn_bcast_event() ?? */
 407        notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
 408
 409        ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
 410        if (ret)
 411                drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
 412                          usermode_helper, cmd, resource_name,
 413                          (ret >> 8) & 0xff, ret);
 414        else
 415                drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
 416                          usermode_helper, cmd, resource_name,
 417                          (ret >> 8) & 0xff, ret);
 418        /* TODO: conn_bcast_event() ?? */
 419        notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
 420
 421        if (ret < 0) /* Ignore any ERRNOs we got. */
 422                ret = 0;
 423
 424        return ret;
 425}
 426
 427static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
 428{
 429        enum drbd_fencing_p fp = FP_NOT_AVAIL;
 430        struct drbd_peer_device *peer_device;
 431        int vnr;
 432
 433        rcu_read_lock();
 434        idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
 435                struct drbd_device *device = peer_device->device;
 436                if (get_ldev_if_state(device, D_CONSISTENT)) {
 437                        struct disk_conf *disk_conf =
 438                                rcu_dereference(peer_device->device->ldev->disk_conf);
 439                        fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
 440                        put_ldev(device);
 441                }
 442        }
 443        rcu_read_unlock();
 444
 445        return fp;
 446}
 447
 448static bool resource_is_supended(struct drbd_resource *resource)
 449{
 450        return resource->susp || resource->susp_fen || resource->susp_nod;
 451}
 452
 453bool conn_try_outdate_peer(struct drbd_connection *connection)
 454{
 455        struct drbd_resource * const resource = connection->resource;
 456        unsigned int connect_cnt;
 457        union drbd_state mask = { };
 458        union drbd_state val = { };
 459        enum drbd_fencing_p fp;
 460        char *ex_to_string;
 461        int r;
 462
 463        spin_lock_irq(&resource->req_lock);
 464        if (connection->cstate >= C_WF_REPORT_PARAMS) {
 465                drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
 466                spin_unlock_irq(&resource->req_lock);
 467                return false;
 468        }
 469
 470        connect_cnt = connection->connect_cnt;
 471        spin_unlock_irq(&resource->req_lock);
 472
 473        fp = highest_fencing_policy(connection);
 474        switch (fp) {
 475        case FP_NOT_AVAIL:
 476                drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
 477                spin_lock_irq(&resource->req_lock);
 478                if (connection->cstate < C_WF_REPORT_PARAMS) {
 479                        _conn_request_state(connection,
 480                                            (union drbd_state) { { .susp_fen = 1 } },
 481                                            (union drbd_state) { { .susp_fen = 0 } },
 482                                            CS_VERBOSE | CS_HARD | CS_DC_SUSP);
 483                        /* We are no longer suspended due to the fencing policy.
 484                         * We may still be suspended due to the on-no-data-accessible policy.
 485                         * If that was OND_IO_ERROR, fail pending requests. */
 486                        if (!resource_is_supended(resource))
 487                                _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
 488                }
 489                /* Else: in case we raced with a connection handshake,
 490                 * let the handshake figure out if we maybe can RESEND,
 491                 * and do not resume/fail pending requests here.
 492                 * Worst case is we stay suspended for now, which may be
 493                 * resolved by either re-establishing the replication link, or
 494                 * the next link failure, or eventually the administrator.  */
 495                spin_unlock_irq(&resource->req_lock);
 496                return false;
 497
 498        case FP_DONT_CARE:
 499                return true;
 500        default: ;
 501        }
 502
 503        r = conn_khelper(connection, "fence-peer");
 504
 505        switch ((r>>8) & 0xff) {
 506        case P_INCONSISTENT: /* peer is inconsistent */
 507                ex_to_string = "peer is inconsistent or worse";
 508                mask.pdsk = D_MASK;
 509                val.pdsk = D_INCONSISTENT;
 510                break;
 511        case P_OUTDATED: /* peer got outdated, or was already outdated */
 512                ex_to_string = "peer was fenced";
 513                mask.pdsk = D_MASK;
 514                val.pdsk = D_OUTDATED;
 515                break;
 516        case P_DOWN: /* peer was down */
 517                if (conn_highest_disk(connection) == D_UP_TO_DATE) {
 518                        /* we will(have) create(d) a new UUID anyways... */
 519                        ex_to_string = "peer is unreachable, assumed to be dead";
 520                        mask.pdsk = D_MASK;
 521                        val.pdsk = D_OUTDATED;
 522                } else {
 523                        ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
 524                }
 525                break;
 526        case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
 527                 * This is useful when an unconnected R_SECONDARY is asked to
 528                 * become R_PRIMARY, but finds the other peer being active. */
 529                ex_to_string = "peer is active";
 530                drbd_warn(connection, "Peer is primary, outdating myself.\n");
 531                mask.disk = D_MASK;
 532                val.disk = D_OUTDATED;
 533                break;
 534        case P_FENCING:
 535                /* THINK: do we need to handle this
 536                 * like case 4, or more like case 5? */
 537                if (fp != FP_STONITH)
 538                        drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
 539                ex_to_string = "peer was stonithed";
 540                mask.pdsk = D_MASK;
 541                val.pdsk = D_OUTDATED;
 542                break;
 543        default:
 544                /* The script is broken ... */
 545                drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
 546                return false; /* Eventually leave IO frozen */
 547        }
 548
 549        drbd_info(connection, "fence-peer helper returned %d (%s)\n",
 550                  (r>>8) & 0xff, ex_to_string);
 551
 552        /* Not using
 553           conn_request_state(connection, mask, val, CS_VERBOSE);
 554           here, because we might were able to re-establish the connection in the
 555           meantime. */
 556        spin_lock_irq(&resource->req_lock);
 557        if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
 558                if (connection->connect_cnt != connect_cnt)
 559                        /* In case the connection was established and droped
 560                           while the fence-peer handler was running, ignore it */
 561                        drbd_info(connection, "Ignoring fence-peer exit code\n");
 562                else
 563                        _conn_request_state(connection, mask, val, CS_VERBOSE);
 564        }
 565        spin_unlock_irq(&resource->req_lock);
 566
 567        return conn_highest_pdsk(connection) <= D_OUTDATED;
 568}
 569
 570static int _try_outdate_peer_async(void *data)
 571{
 572        struct drbd_connection *connection = (struct drbd_connection *)data;
 573
 574        conn_try_outdate_peer(connection);
 575
 576        kref_put(&connection->kref, drbd_destroy_connection);
 577        return 0;
 578}
 579
 580void conn_try_outdate_peer_async(struct drbd_connection *connection)
 581{
 582        struct task_struct *opa;
 583
 584        kref_get(&connection->kref);
 585        /* We may just have force_sig()'ed this thread
 586         * to get it out of some blocking network function.
 587         * Clear signals; otherwise kthread_run(), which internally uses
 588         * wait_on_completion_killable(), will mistake our pending signal
 589         * for a new fatal signal and fail. */
 590        flush_signals(current);
 591        opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
 592        if (IS_ERR(opa)) {
 593                drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
 594                kref_put(&connection->kref, drbd_destroy_connection);
 595        }
 596}
 597
 598enum drbd_state_rv
 599drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
 600{
 601        struct drbd_peer_device *const peer_device = first_peer_device(device);
 602        struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
 603        const int max_tries = 4;
 604        enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
 605        struct net_conf *nc;
 606        int try = 0;
 607        int forced = 0;
 608        union drbd_state mask, val;
 609
 610        if (new_role == R_PRIMARY) {
 611                struct drbd_connection *connection;
 612
 613                /* Detect dead peers as soon as possible.  */
 614
 615                rcu_read_lock();
 616                for_each_connection(connection, device->resource)
 617                        request_ping(connection);
 618                rcu_read_unlock();
 619        }
 620
 621        mutex_lock(device->state_mutex);
 622
 623        mask.i = 0; mask.role = R_MASK;
 624        val.i  = 0; val.role  = new_role;
 625
 626        while (try++ < max_tries) {
 627                rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
 628
 629                /* in case we first succeeded to outdate,
 630                 * but now suddenly could establish a connection */
 631                if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
 632                        val.pdsk = 0;
 633                        mask.pdsk = 0;
 634                        continue;
 635                }
 636
 637                if (rv == SS_NO_UP_TO_DATE_DISK && force &&
 638                    (device->state.disk < D_UP_TO_DATE &&
 639                     device->state.disk >= D_INCONSISTENT)) {
 640                        mask.disk = D_MASK;
 641                        val.disk  = D_UP_TO_DATE;
 642                        forced = 1;
 643                        continue;
 644                }
 645
 646                if (rv == SS_NO_UP_TO_DATE_DISK &&
 647                    device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
 648                        D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
 649
 650                        if (conn_try_outdate_peer(connection)) {
 651                                val.disk = D_UP_TO_DATE;
 652                                mask.disk = D_MASK;
 653                        }
 654                        continue;
 655                }
 656
 657                if (rv == SS_NOTHING_TO_DO)
 658                        goto out;
 659                if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
 660                        if (!conn_try_outdate_peer(connection) && force) {
 661                                drbd_warn(device, "Forced into split brain situation!\n");
 662                                mask.pdsk = D_MASK;
 663                                val.pdsk  = D_OUTDATED;
 664
 665                        }
 666                        continue;
 667                }
 668                if (rv == SS_TWO_PRIMARIES) {
 669                        /* Maybe the peer is detected as dead very soon...
 670                           retry at most once more in this case. */
 671                        int timeo;
 672                        rcu_read_lock();
 673                        nc = rcu_dereference(connection->net_conf);
 674                        timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
 675                        rcu_read_unlock();
 676                        schedule_timeout_interruptible(timeo);
 677                        if (try < max_tries)
 678                                try = max_tries - 1;
 679                        continue;
 680                }
 681                if (rv < SS_SUCCESS) {
 682                        rv = _drbd_request_state(device, mask, val,
 683                                                CS_VERBOSE + CS_WAIT_COMPLETE);
 684                        if (rv < SS_SUCCESS)
 685                                goto out;
 686                }
 687                break;
 688        }
 689
 690        if (rv < SS_SUCCESS)
 691                goto out;
 692
 693        if (forced)
 694                drbd_warn(device, "Forced to consider local data as UpToDate!\n");
 695
 696        /* Wait until nothing is on the fly :) */
 697        wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
 698
 699        /* FIXME also wait for all pending P_BARRIER_ACK? */
 700
 701        if (new_role == R_SECONDARY) {
 702                if (get_ldev(device)) {
 703                        device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
 704                        put_ldev(device);
 705                }
 706        } else {
 707                mutex_lock(&device->resource->conf_update);
 708                nc = connection->net_conf;
 709                if (nc)
 710                        nc->discard_my_data = 0; /* without copy; single bit op is atomic */
 711                mutex_unlock(&device->resource->conf_update);
 712
 713                if (get_ldev(device)) {
 714                        if (((device->state.conn < C_CONNECTED ||
 715                               device->state.pdsk <= D_FAILED)
 716                              && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
 717                                drbd_uuid_new_current(device);
 718
 719                        device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
 720                        put_ldev(device);
 721                }
 722        }
 723
 724        /* writeout of activity log covered areas of the bitmap
 725         * to stable storage done in after state change already */
 726
 727        if (device->state.conn >= C_WF_REPORT_PARAMS) {
 728                /* if this was forced, we should consider sync */
 729                if (forced)
 730                        drbd_send_uuids(peer_device);
 731                drbd_send_current_state(peer_device);
 732        }
 733
 734        drbd_md_sync(device);
 735        set_disk_ro(device->vdisk, new_role == R_SECONDARY);
 736        kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
 737out:
 738        mutex_unlock(device->state_mutex);
 739        return rv;
 740}
 741
 742static const char *from_attrs_err_to_txt(int err)
 743{
 744        return  err == -ENOMSG ? "required attribute missing" :
 745                err == -EOPNOTSUPP ? "unknown mandatory attribute" :
 746                err == -EEXIST ? "can not change invariant setting" :
 747                "invalid attribute value";
 748}
 749
 750int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
 751{
 752        struct drbd_config_context adm_ctx;
 753        struct set_role_parms parms;
 754        int err;
 755        enum drbd_ret_code retcode;
 756
 757        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
 758        if (!adm_ctx.reply_skb)
 759                return retcode;
 760        if (retcode != NO_ERROR)
 761                goto out;
 762
 763        memset(&parms, 0, sizeof(parms));
 764        if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
 765                err = set_role_parms_from_attrs(&parms, info);
 766                if (err) {
 767                        retcode = ERR_MANDATORY_TAG;
 768                        drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
 769                        goto out;
 770                }
 771        }
 772        genl_unlock();
 773        mutex_lock(&adm_ctx.resource->adm_mutex);
 774
 775        if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
 776                retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
 777        else
 778                retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
 779
 780        mutex_unlock(&adm_ctx.resource->adm_mutex);
 781        genl_lock();
 782out:
 783        drbd_adm_finish(&adm_ctx, info, retcode);
 784        return 0;
 785}
 786
 787/* Initializes the md.*_offset members, so we are able to find
 788 * the on disk meta data.
 789 *
 790 * We currently have two possible layouts:
 791 * external:
 792 *   |----------- md_size_sect ------------------|
 793 *   [ 4k superblock ][ activity log ][  Bitmap  ]
 794 *   | al_offset == 8 |
 795 *   | bm_offset = al_offset + X      |
 796 *  ==> bitmap sectors = md_size_sect - bm_offset
 797 *
 798 * internal:
 799 *            |----------- md_size_sect ------------------|
 800 * [data.....][  Bitmap  ][ activity log ][ 4k superblock ]
 801 *                        | al_offset < 0 |
 802 *            | bm_offset = al_offset - Y |
 803 *  ==> bitmap sectors = Y = al_offset - bm_offset
 804 *
 805 *  Activity log size used to be fixed 32kB,
 806 *  but is about to become configurable.
 807 */
 808static void drbd_md_set_sector_offsets(struct drbd_device *device,
 809                                       struct drbd_backing_dev *bdev)
 810{
 811        sector_t md_size_sect = 0;
 812        unsigned int al_size_sect = bdev->md.al_size_4k * 8;
 813
 814        bdev->md.md_offset = drbd_md_ss(bdev);
 815
 816        switch (bdev->md.meta_dev_idx) {
 817        default:
 818                /* v07 style fixed size indexed meta data */
 819                bdev->md.md_size_sect = MD_128MB_SECT;
 820                bdev->md.al_offset = MD_4kB_SECT;
 821                bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
 822                break;
 823        case DRBD_MD_INDEX_FLEX_EXT:
 824                /* just occupy the full device; unit: sectors */
 825                bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
 826                bdev->md.al_offset = MD_4kB_SECT;
 827                bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
 828                break;
 829        case DRBD_MD_INDEX_INTERNAL:
 830        case DRBD_MD_INDEX_FLEX_INT:
 831                /* al size is still fixed */
 832                bdev->md.al_offset = -al_size_sect;
 833                /* we need (slightly less than) ~ this much bitmap sectors: */
 834                md_size_sect = drbd_get_capacity(bdev->backing_bdev);
 835                md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
 836                md_size_sect = BM_SECT_TO_EXT(md_size_sect);
 837                md_size_sect = ALIGN(md_size_sect, 8);
 838
 839                /* plus the "drbd meta data super block",
 840                 * and the activity log; */
 841                md_size_sect += MD_4kB_SECT + al_size_sect;
 842
 843                bdev->md.md_size_sect = md_size_sect;
 844                /* bitmap offset is adjusted by 'super' block size */
 845                bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
 846                break;
 847        }
 848}
 849
 850/* input size is expected to be in KB */
 851char *ppsize(char *buf, unsigned long long size)
 852{
 853        /* Needs 9 bytes at max including trailing NUL:
 854         * -1ULL ==> "16384 EB" */
 855        static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
 856        int base = 0;
 857        while (size >= 10000 && base < sizeof(units)-1) {
 858                /* shift + round */
 859                size = (size >> 10) + !!(size & (1<<9));
 860                base++;
 861        }
 862        sprintf(buf, "%u %cB", (unsigned)size, units[base]);
 863
 864        return buf;
 865}
 866
 867/* there is still a theoretical deadlock when called from receiver
 868 * on an D_INCONSISTENT R_PRIMARY:
 869 *  remote READ does inc_ap_bio, receiver would need to receive answer
 870 *  packet from remote to dec_ap_bio again.
 871 *  receiver receive_sizes(), comes here,
 872 *  waits for ap_bio_cnt == 0. -> deadlock.
 873 * but this cannot happen, actually, because:
 874 *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
 875 *  (not connected, or bad/no disk on peer):
 876 *  see drbd_fail_request_early, ap_bio_cnt is zero.
 877 *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
 878 *  peer may not initiate a resize.
 879 */
 880/* Note these are not to be confused with
 881 * drbd_adm_suspend_io/drbd_adm_resume_io,
 882 * which are (sub) state changes triggered by admin (drbdsetup),
 883 * and can be long lived.
 884 * This changes an device->flag, is triggered by drbd internals,
 885 * and should be short-lived. */
 886/* It needs to be a counter, since multiple threads might
 887   independently suspend and resume IO. */
 888void drbd_suspend_io(struct drbd_device *device)
 889{
 890        atomic_inc(&device->suspend_cnt);
 891        if (drbd_suspended(device))
 892                return;
 893        wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
 894}
 895
 896void drbd_resume_io(struct drbd_device *device)
 897{
 898        if (atomic_dec_and_test(&device->suspend_cnt))
 899                wake_up(&device->misc_wait);
 900}
 901
 902/**
 903 * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
 904 * @device:     DRBD device.
 905 *
 906 * Returns 0 on success, negative return values indicate errors.
 907 * You should call drbd_md_sync() after calling this function.
 908 */
 909enum determine_dev_size
 910drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
 911{
 912        struct md_offsets_and_sizes {
 913                u64 last_agreed_sect;
 914                u64 md_offset;
 915                s32 al_offset;
 916                s32 bm_offset;
 917                u32 md_size_sect;
 918
 919                u32 al_stripes;
 920                u32 al_stripe_size_4k;
 921        } prev;
 922        sector_t u_size, size;
 923        struct drbd_md *md = &device->ldev->md;
 924        char ppb[10];
 925        void *buffer;
 926
 927        int md_moved, la_size_changed;
 928        enum determine_dev_size rv = DS_UNCHANGED;
 929
 930        /* We may change the on-disk offsets of our meta data below.  Lock out
 931         * anything that may cause meta data IO, to avoid acting on incomplete
 932         * layout changes or scribbling over meta data that is in the process
 933         * of being moved.
 934         *
 935         * Move is not exactly correct, btw, currently we have all our meta
 936         * data in core memory, to "move" it we just write it all out, there
 937         * are no reads. */
 938        drbd_suspend_io(device);
 939        buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
 940        if (!buffer) {
 941                drbd_resume_io(device);
 942                return DS_ERROR;
 943        }
 944
 945        /* remember current offset and sizes */
 946        prev.last_agreed_sect = md->la_size_sect;
 947        prev.md_offset = md->md_offset;
 948        prev.al_offset = md->al_offset;
 949        prev.bm_offset = md->bm_offset;
 950        prev.md_size_sect = md->md_size_sect;
 951        prev.al_stripes = md->al_stripes;
 952        prev.al_stripe_size_4k = md->al_stripe_size_4k;
 953
 954        if (rs) {
 955                /* rs is non NULL if we should change the AL layout only */
 956                md->al_stripes = rs->al_stripes;
 957                md->al_stripe_size_4k = rs->al_stripe_size / 4;
 958                md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
 959        }
 960
 961        drbd_md_set_sector_offsets(device, device->ldev);
 962
 963        rcu_read_lock();
 964        u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
 965        rcu_read_unlock();
 966        size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
 967
 968        if (size < prev.last_agreed_sect) {
 969                if (rs && u_size == 0) {
 970                        /* Remove "rs &&" later. This check should always be active, but
 971                           right now the receiver expects the permissive behavior */
 972                        drbd_warn(device, "Implicit shrink not allowed. "
 973                                 "Use --size=%llus for explicit shrink.\n",
 974                                 (unsigned long long)size);
 975                        rv = DS_ERROR_SHRINK;
 976                }
 977                if (u_size > size)
 978                        rv = DS_ERROR_SPACE_MD;
 979                if (rv != DS_UNCHANGED)
 980                        goto err_out;
 981        }
 982
 983        if (drbd_get_capacity(device->this_bdev) != size ||
 984            drbd_bm_capacity(device) != size) {
 985                int err;
 986                err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
 987                if (unlikely(err)) {
 988                        /* currently there is only one error: ENOMEM! */
 989                        size = drbd_bm_capacity(device);
 990                        if (size == 0) {
 991                                drbd_err(device, "OUT OF MEMORY! "
 992                                    "Could not allocate bitmap!\n");
 993                        } else {
 994                                drbd_err(device, "BM resizing failed. "
 995                                    "Leaving size unchanged\n");
 996                        }
 997                        rv = DS_ERROR;
 998                }
 999                /* racy, see comments above. */
1000                drbd_set_my_capacity(device, size);
1001                md->la_size_sect = size;
1002                drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
1003                     (unsigned long long)size>>1);
1004        }
1005        if (rv <= DS_ERROR)
1006                goto err_out;
1007
1008        la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1009
1010        md_moved = prev.md_offset    != md->md_offset
1011                || prev.md_size_sect != md->md_size_sect;
1012
1013        if (la_size_changed || md_moved || rs) {
1014                u32 prev_flags;
1015
1016                /* We do some synchronous IO below, which may take some time.
1017                 * Clear the timer, to avoid scary "timer expired!" messages,
1018                 * "Superblock" is written out at least twice below, anyways. */
1019                del_timer(&device->md_sync_timer);
1020
1021                /* We won't change the "al-extents" setting, we just may need
1022                 * to move the on-disk location of the activity log ringbuffer.
1023                 * Lock for transaction is good enough, it may well be "dirty"
1024                 * or even "starving". */
1025                wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1026
1027                /* mark current on-disk bitmap and activity log as unreliable */
1028                prev_flags = md->flags;
1029                md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1030                drbd_md_write(device, buffer);
1031
1032                drbd_al_initialize(device, buffer);
1033
1034                drbd_info(device, "Writing the whole bitmap, %s\n",
1035                         la_size_changed && md_moved ? "size changed and md moved" :
1036                         la_size_changed ? "size changed" : "md moved");
1037                /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1038                drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1039                               "size changed", BM_LOCKED_MASK);
1040
1041                /* on-disk bitmap and activity log is authoritative again
1042                 * (unless there was an IO error meanwhile...) */
1043                md->flags = prev_flags;
1044                drbd_md_write(device, buffer);
1045
1046                if (rs)
1047                        drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1048                                  md->al_stripes, md->al_stripe_size_4k * 4);
1049        }
1050
1051        if (size > prev.last_agreed_sect)
1052                rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1053        if (size < prev.last_agreed_sect)
1054                rv = DS_SHRUNK;
1055
1056        if (0) {
1057        err_out:
1058                /* restore previous offset and sizes */
1059                md->la_size_sect = prev.last_agreed_sect;
1060                md->md_offset = prev.md_offset;
1061                md->al_offset = prev.al_offset;
1062                md->bm_offset = prev.bm_offset;
1063                md->md_size_sect = prev.md_size_sect;
1064                md->al_stripes = prev.al_stripes;
1065                md->al_stripe_size_4k = prev.al_stripe_size_4k;
1066                md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1067        }
1068        lc_unlock(device->act_log);
1069        wake_up(&device->al_wait);
1070        drbd_md_put_buffer(device);
1071        drbd_resume_io(device);
1072
1073        return rv;
1074}
1075
1076sector_t
1077drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1078                  sector_t u_size, int assume_peer_has_space)
1079{
1080        sector_t p_size = device->p_size;   /* partner's disk size. */
1081        sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1082        sector_t m_size; /* my size */
1083        sector_t size = 0;
1084
1085        m_size = drbd_get_max_capacity(bdev);
1086
1087        if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1088                drbd_warn(device, "Resize while not connected was forced by the user!\n");
1089                p_size = m_size;
1090        }
1091
1092        if (p_size && m_size) {
1093                size = min_t(sector_t, p_size, m_size);
1094        } else {
1095                if (la_size_sect) {
1096                        size = la_size_sect;
1097                        if (m_size && m_size < size)
1098                                size = m_size;
1099                        if (p_size && p_size < size)
1100                                size = p_size;
1101                } else {
1102                        if (m_size)
1103                                size = m_size;
1104                        if (p_size)
1105                                size = p_size;
1106                }
1107        }
1108
1109        if (size == 0)
1110                drbd_err(device, "Both nodes diskless!\n");
1111
1112        if (u_size) {
1113                if (u_size > size)
1114                        drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1115                            (unsigned long)u_size>>1, (unsigned long)size>>1);
1116                else
1117                        size = u_size;
1118        }
1119
1120        return size;
1121}
1122
1123/**
1124 * drbd_check_al_size() - Ensures that the AL is of the right size
1125 * @device:     DRBD device.
1126 *
1127 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1128 * failed, and 0 on success. You should call drbd_md_sync() after you called
1129 * this function.
1130 */
1131static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1132{
1133        struct lru_cache *n, *t;
1134        struct lc_element *e;
1135        unsigned int in_use;
1136        int i;
1137
1138        if (device->act_log &&
1139            device->act_log->nr_elements == dc->al_extents)
1140                return 0;
1141
1142        in_use = 0;
1143        t = device->act_log;
1144        n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1145                dc->al_extents, sizeof(struct lc_element), 0);
1146
1147        if (n == NULL) {
1148                drbd_err(device, "Cannot allocate act_log lru!\n");
1149                return -ENOMEM;
1150        }
1151        spin_lock_irq(&device->al_lock);
1152        if (t) {
1153                for (i = 0; i < t->nr_elements; i++) {
1154                        e = lc_element_by_index(t, i);
1155                        if (e->refcnt)
1156                                drbd_err(device, "refcnt(%d)==%d\n",
1157                                    e->lc_number, e->refcnt);
1158                        in_use += e->refcnt;
1159                }
1160        }
1161        if (!in_use)
1162                device->act_log = n;
1163        spin_unlock_irq(&device->al_lock);
1164        if (in_use) {
1165                drbd_err(device, "Activity log still in use!\n");
1166                lc_destroy(n);
1167                return -EBUSY;
1168        } else {
1169                lc_destroy(t);
1170        }
1171        drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1172        return 0;
1173}
1174
1175static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1176{
1177        q->limits.discard_granularity = granularity;
1178}
1179
1180static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1181{
1182        /* when we introduced REQ_WRITE_SAME support, we also bumped
1183         * our maximum supported batch bio size used for discards. */
1184        if (connection->agreed_features & DRBD_FF_WSAME)
1185                return DRBD_MAX_BBIO_SECTORS;
1186        /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1187        return AL_EXTENT_SIZE >> 9;
1188}
1189
1190static void decide_on_discard_support(struct drbd_device *device,
1191                        struct request_queue *q,
1192                        struct request_queue *b,
1193                        bool discard_zeroes_if_aligned)
1194{
1195        /* q = drbd device queue (device->rq_queue)
1196         * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1197         *     or NULL if diskless
1198         */
1199        struct drbd_connection *connection = first_peer_device(device)->connection;
1200        bool can_do = b ? blk_queue_discard(b) : true;
1201
1202        if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
1203                can_do = false;
1204                drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
1205        }
1206        if (can_do) {
1207                /* We don't care for the granularity, really.
1208                 * Stacking limits below should fix it for the local
1209                 * device.  Whether or not it is a suitable granularity
1210                 * on the remote device is not our problem, really. If
1211                 * you care, you need to use devices with similar
1212                 * topology on all peers. */
1213                blk_queue_discard_granularity(q, 512);
1214                q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
1215                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1216                q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
1217        } else {
1218                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1219                blk_queue_discard_granularity(q, 0);
1220                q->limits.max_discard_sectors = 0;
1221                q->limits.max_write_zeroes_sectors = 0;
1222        }
1223}
1224
1225static void fixup_discard_if_not_supported(struct request_queue *q)
1226{
1227        /* To avoid confusion, if this queue does not support discard, clear
1228         * max_discard_sectors, which is what lsblk -D reports to the user.
1229         * Older kernels got this wrong in "stack limits".
1230         * */
1231        if (!blk_queue_discard(q)) {
1232                blk_queue_max_discard_sectors(q, 0);
1233                blk_queue_discard_granularity(q, 0);
1234        }
1235}
1236
1237static void decide_on_write_same_support(struct drbd_device *device,
1238                        struct request_queue *q,
1239                        struct request_queue *b, struct o_qlim *o)
1240{
1241        struct drbd_peer_device *peer_device = first_peer_device(device);
1242        struct drbd_connection *connection = peer_device->connection;
1243        bool can_do = b ? b->limits.max_write_same_sectors : true;
1244
1245        if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
1246                can_do = false;
1247                drbd_info(peer_device, "peer does not support WRITE_SAME\n");
1248        }
1249
1250        if (o) {
1251                /* logical block size; queue_logical_block_size(NULL) is 512 */
1252                unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
1253                unsigned int me_lbs_b = queue_logical_block_size(b);
1254                unsigned int me_lbs = queue_logical_block_size(q);
1255
1256                if (me_lbs_b != me_lbs) {
1257                        drbd_warn(device,
1258                                "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
1259                                me_lbs, me_lbs_b);
1260                        /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
1261                        can_do = false;
1262                }
1263                if (me_lbs_b != peer_lbs) {
1264                        drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
1265                                me_lbs, peer_lbs);
1266                        if (can_do) {
1267                                drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
1268                                can_do = false;
1269                        }
1270                        me_lbs = max(me_lbs, me_lbs_b);
1271                        /* We cannot change the logical block size of an in-use queue.
1272                         * We can only hope that access happens to be properly aligned.
1273                         * If not, the peer will likely produce an IO error, and detach. */
1274                        if (peer_lbs > me_lbs) {
1275                                if (device->state.role != R_PRIMARY) {
1276                                        blk_queue_logical_block_size(q, peer_lbs);
1277                                        drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
1278                                } else {
1279                                        drbd_warn(peer_device,
1280                                                "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
1281                                                me_lbs, peer_lbs);
1282                                }
1283                        }
1284                }
1285                if (can_do && !o->write_same_capable) {
1286                        /* If we introduce an open-coded write-same loop on the receiving side,
1287                         * the peer would present itself as "capable". */
1288                        drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
1289                        can_do = false;
1290                }
1291        }
1292
1293        blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
1294}
1295
1296static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1297                                   unsigned int max_bio_size, struct o_qlim *o)
1298{
1299        struct request_queue * const q = device->rq_queue;
1300        unsigned int max_hw_sectors = max_bio_size >> 9;
1301        unsigned int max_segments = 0;
1302        struct request_queue *b = NULL;
1303        struct disk_conf *dc;
1304        bool discard_zeroes_if_aligned = true;
1305
1306        if (bdev) {
1307                b = bdev->backing_bdev->bd_disk->queue;
1308
1309                max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1310                rcu_read_lock();
1311                dc = rcu_dereference(device->ldev->disk_conf);
1312                max_segments = dc->max_bio_bvecs;
1313                discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
1314                rcu_read_unlock();
1315
1316                blk_set_stacking_limits(&q->limits);
1317        }
1318
1319        blk_queue_max_hw_sectors(q, max_hw_sectors);
1320        /* This is the workaround for "bio would need to, but cannot, be split" */
1321        blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1322        blk_queue_segment_boundary(q, PAGE_SIZE-1);
1323        decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
1324        decide_on_write_same_support(device, q, b, o);
1325
1326        if (b) {
1327                blk_queue_stack_limits(q, b);
1328
1329                if (q->backing_dev_info->ra_pages !=
1330                    b->backing_dev_info->ra_pages) {
1331                        drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1332                                 q->backing_dev_info->ra_pages,
1333                                 b->backing_dev_info->ra_pages);
1334                        q->backing_dev_info->ra_pages =
1335                                                b->backing_dev_info->ra_pages;
1336                }
1337        }
1338        fixup_discard_if_not_supported(q);
1339}
1340
1341void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
1342{
1343        unsigned int now, new, local, peer;
1344
1345        now = queue_max_hw_sectors(device->rq_queue) << 9;
1346        local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1347        peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1348
1349        if (bdev) {
1350                local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1351                device->local_max_bio_size = local;
1352        }
1353        local = min(local, DRBD_MAX_BIO_SIZE);
1354
1355        /* We may ignore peer limits if the peer is modern enough.
1356           Because new from 8.3.8 onwards the peer can use multiple
1357           BIOs for a single peer_request */
1358        if (device->state.conn >= C_WF_REPORT_PARAMS) {
1359                if (first_peer_device(device)->connection->agreed_pro_version < 94)
1360                        peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1361                        /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1362                else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1363                        peer = DRBD_MAX_SIZE_H80_PACKET;
1364                else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1365                        peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
1366                else
1367                        peer = DRBD_MAX_BIO_SIZE;
1368
1369                /* We may later detach and re-attach on a disconnected Primary.
1370                 * Avoid this setting to jump back in that case.
1371                 * We want to store what we know the peer DRBD can handle,
1372                 * not what the peer IO backend can handle. */
1373                if (peer > device->peer_max_bio_size)
1374                        device->peer_max_bio_size = peer;
1375        }
1376        new = min(local, peer);
1377
1378        if (device->state.role == R_PRIMARY && new < now)
1379                drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1380
1381        if (new != now)
1382                drbd_info(device, "max BIO size = %u\n", new);
1383
1384        drbd_setup_queue_param(device, bdev, new, o);
1385}
1386
1387/* Starts the worker thread */
1388static void conn_reconfig_start(struct drbd_connection *connection)
1389{
1390        drbd_thread_start(&connection->worker);
1391        drbd_flush_workqueue(&connection->sender_work);
1392}
1393
1394/* if still unconfigured, stops worker again. */
1395static void conn_reconfig_done(struct drbd_connection *connection)
1396{
1397        bool stop_threads;
1398        spin_lock_irq(&connection->resource->req_lock);
1399        stop_threads = conn_all_vols_unconf(connection) &&
1400                connection->cstate == C_STANDALONE;
1401        spin_unlock_irq(&connection->resource->req_lock);
1402        if (stop_threads) {
1403                /* ack_receiver thread and ack_sender workqueue are implicitly
1404                 * stopped by receiver in conn_disconnect() */
1405                drbd_thread_stop(&connection->receiver);
1406                drbd_thread_stop(&connection->worker);
1407        }
1408}
1409
1410/* Make sure IO is suspended before calling this function(). */
1411static void drbd_suspend_al(struct drbd_device *device)
1412{
1413        int s = 0;
1414
1415        if (!lc_try_lock(device->act_log)) {
1416                drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1417                return;
1418        }
1419
1420        drbd_al_shrink(device);
1421        spin_lock_irq(&device->resource->req_lock);
1422        if (device->state.conn < C_CONNECTED)
1423                s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1424        spin_unlock_irq(&device->resource->req_lock);
1425        lc_unlock(device->act_log);
1426
1427        if (s)
1428                drbd_info(device, "Suspended AL updates\n");
1429}
1430
1431
1432static bool should_set_defaults(struct genl_info *info)
1433{
1434        unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1435        return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1436}
1437
1438static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1439{
1440        /* This is limited by 16 bit "slot" numbers,
1441         * and by available on-disk context storage.
1442         *
1443         * Also (u16)~0 is special (denotes a "free" extent).
1444         *
1445         * One transaction occupies one 4kB on-disk block,
1446         * we have n such blocks in the on disk ring buffer,
1447         * the "current" transaction may fail (n-1),
1448         * and there is 919 slot numbers context information per transaction.
1449         *
1450         * 72 transaction blocks amounts to more than 2**16 context slots,
1451         * so cap there first.
1452         */
1453        const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1454        const unsigned int sufficient_on_disk =
1455                (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1456                /AL_CONTEXT_PER_TRANSACTION;
1457
1458        unsigned int al_size_4k = bdev->md.al_size_4k;
1459
1460        if (al_size_4k > sufficient_on_disk)
1461                return max_al_nr;
1462
1463        return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1464}
1465
1466static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1467{
1468        return  a->disk_barrier != b->disk_barrier ||
1469                a->disk_flushes != b->disk_flushes ||
1470                a->disk_drain != b->disk_drain;
1471}
1472
1473static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1474                               struct drbd_backing_dev *nbc)
1475{
1476        struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
1477
1478        if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1479                disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1480        if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1481                disk_conf->al_extents = drbd_al_extents_max(nbc);
1482
1483        if (!blk_queue_discard(q)) {
1484                if (disk_conf->rs_discard_granularity) {
1485                        disk_conf->rs_discard_granularity = 0; /* disable feature */
1486                        drbd_info(device, "rs_discard_granularity feature disabled\n");
1487                }
1488        }
1489
1490        if (disk_conf->rs_discard_granularity) {
1491                int orig_value = disk_conf->rs_discard_granularity;
1492                int remainder;
1493
1494                if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
1495                        disk_conf->rs_discard_granularity = q->limits.discard_granularity;
1496
1497                remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
1498                disk_conf->rs_discard_granularity += remainder;
1499
1500                if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
1501                        disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
1502
1503                if (disk_conf->rs_discard_granularity != orig_value)
1504                        drbd_info(device, "rs_discard_granularity changed to %d\n",
1505                                  disk_conf->rs_discard_granularity);
1506        }
1507}
1508
1509int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1510{
1511        struct drbd_config_context adm_ctx;
1512        enum drbd_ret_code retcode;
1513        struct drbd_device *device;
1514        struct disk_conf *new_disk_conf, *old_disk_conf;
1515        struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1516        int err, fifo_size;
1517
1518        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1519        if (!adm_ctx.reply_skb)
1520                return retcode;
1521        if (retcode != NO_ERROR)
1522                goto finish;
1523
1524        device = adm_ctx.device;
1525        mutex_lock(&adm_ctx.resource->adm_mutex);
1526
1527        /* we also need a disk
1528         * to change the options on */
1529        if (!get_ldev(device)) {
1530                retcode = ERR_NO_DISK;
1531                goto out;
1532        }
1533
1534        new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1535        if (!new_disk_conf) {
1536                retcode = ERR_NOMEM;
1537                goto fail;
1538        }
1539
1540        mutex_lock(&device->resource->conf_update);
1541        old_disk_conf = device->ldev->disk_conf;
1542        *new_disk_conf = *old_disk_conf;
1543        if (should_set_defaults(info))
1544                set_disk_conf_defaults(new_disk_conf);
1545
1546        err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1547        if (err && err != -ENOMSG) {
1548                retcode = ERR_MANDATORY_TAG;
1549                drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1550                goto fail_unlock;
1551        }
1552
1553        if (!expect(new_disk_conf->resync_rate >= 1))
1554                new_disk_conf->resync_rate = 1;
1555
1556        sanitize_disk_conf(device, new_disk_conf, device->ldev);
1557
1558        if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1559                new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1560
1561        fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1562        if (fifo_size != device->rs_plan_s->size) {
1563                new_plan = fifo_alloc(fifo_size);
1564                if (!new_plan) {
1565                        drbd_err(device, "kmalloc of fifo_buffer failed");
1566                        retcode = ERR_NOMEM;
1567                        goto fail_unlock;
1568                }
1569        }
1570
1571        drbd_suspend_io(device);
1572        wait_event(device->al_wait, lc_try_lock(device->act_log));
1573        drbd_al_shrink(device);
1574        err = drbd_check_al_size(device, new_disk_conf);
1575        lc_unlock(device->act_log);
1576        wake_up(&device->al_wait);
1577        drbd_resume_io(device);
1578
1579        if (err) {
1580                retcode = ERR_NOMEM;
1581                goto fail_unlock;
1582        }
1583
1584        lock_all_resources();
1585        retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1586        if (retcode == NO_ERROR) {
1587                rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1588                drbd_resync_after_changed(device);
1589        }
1590        unlock_all_resources();
1591
1592        if (retcode != NO_ERROR)
1593                goto fail_unlock;
1594
1595        if (new_plan) {
1596                old_plan = device->rs_plan_s;
1597                rcu_assign_pointer(device->rs_plan_s, new_plan);
1598        }
1599
1600        mutex_unlock(&device->resource->conf_update);
1601
1602        if (new_disk_conf->al_updates)
1603                device->ldev->md.flags &= ~MDF_AL_DISABLED;
1604        else
1605                device->ldev->md.flags |= MDF_AL_DISABLED;
1606
1607        if (new_disk_conf->md_flushes)
1608                clear_bit(MD_NO_FUA, &device->flags);
1609        else
1610                set_bit(MD_NO_FUA, &device->flags);
1611
1612        if (write_ordering_changed(old_disk_conf, new_disk_conf))
1613                drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1614
1615        if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
1616                drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1617
1618        drbd_md_sync(device);
1619
1620        if (device->state.conn >= C_CONNECTED) {
1621                struct drbd_peer_device *peer_device;
1622
1623                for_each_peer_device(peer_device, device)
1624                        drbd_send_sync_param(peer_device);
1625        }
1626
1627        synchronize_rcu();
1628        kfree(old_disk_conf);
1629        kfree(old_plan);
1630        mod_timer(&device->request_timer, jiffies + HZ);
1631        goto success;
1632
1633fail_unlock:
1634        mutex_unlock(&device->resource->conf_update);
1635 fail:
1636        kfree(new_disk_conf);
1637        kfree(new_plan);
1638success:
1639        put_ldev(device);
1640 out:
1641        mutex_unlock(&adm_ctx.resource->adm_mutex);
1642 finish:
1643        drbd_adm_finish(&adm_ctx, info, retcode);
1644        return 0;
1645}
1646
1647static struct block_device *open_backing_dev(struct drbd_device *device,
1648                const char *bdev_path, void *claim_ptr, bool do_bd_link)
1649{
1650        struct block_device *bdev;
1651        int err = 0;
1652
1653        bdev = blkdev_get_by_path(bdev_path,
1654                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
1655        if (IS_ERR(bdev)) {
1656                drbd_err(device, "open(\"%s\") failed with %ld\n",
1657                                bdev_path, PTR_ERR(bdev));
1658                return bdev;
1659        }
1660
1661        if (!do_bd_link)
1662                return bdev;
1663
1664        err = bd_link_disk_holder(bdev, device->vdisk);
1665        if (err) {
1666                blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1667                drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1668                                bdev_path, err);
1669                bdev = ERR_PTR(err);
1670        }
1671        return bdev;
1672}
1673
1674static int open_backing_devices(struct drbd_device *device,
1675                struct disk_conf *new_disk_conf,
1676                struct drbd_backing_dev *nbc)
1677{
1678        struct block_device *bdev;
1679
1680        bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1681        if (IS_ERR(bdev))
1682                return ERR_OPEN_DISK;
1683        nbc->backing_bdev = bdev;
1684
1685        /*
1686         * meta_dev_idx >= 0: external fixed size, possibly multiple
1687         * drbd sharing one meta device.  TODO in that case, paranoia
1688         * check that [md_bdev, meta_dev_idx] is not yet used by some
1689         * other drbd minor!  (if you use drbd.conf + drbdadm, that
1690         * should check it for you already; but if you don't, or
1691         * someone fooled it, we need to double check here)
1692         */
1693        bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1694                /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1695                 * if potentially shared with other drbd minors */
1696                        (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1697                /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1698                 * as would happen with internal metadata. */
1699                        (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1700                         new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1701        if (IS_ERR(bdev))
1702                return ERR_OPEN_MD_DISK;
1703        nbc->md_bdev = bdev;
1704        return NO_ERROR;
1705}
1706
1707static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1708        bool do_bd_unlink)
1709{
1710        if (!bdev)
1711                return;
1712        if (do_bd_unlink)
1713                bd_unlink_disk_holder(bdev, device->vdisk);
1714        blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1715}
1716
1717void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1718{
1719        if (ldev == NULL)
1720                return;
1721
1722        close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
1723        close_backing_dev(device, ldev->backing_bdev, true);
1724
1725        kfree(ldev->disk_conf);
1726        kfree(ldev);
1727}
1728
1729int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1730{
1731        struct drbd_config_context adm_ctx;
1732        struct drbd_device *device;
1733        struct drbd_peer_device *peer_device;
1734        struct drbd_connection *connection;
1735        int err;
1736        enum drbd_ret_code retcode;
1737        enum determine_dev_size dd;
1738        sector_t max_possible_sectors;
1739        sector_t min_md_device_sectors;
1740        struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1741        struct disk_conf *new_disk_conf = NULL;
1742        struct lru_cache *resync_lru = NULL;
1743        struct fifo_buffer *new_plan = NULL;
1744        union drbd_state ns, os;
1745        enum drbd_state_rv rv;
1746        struct net_conf *nc;
1747
1748        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1749        if (!adm_ctx.reply_skb)
1750                return retcode;
1751        if (retcode != NO_ERROR)
1752                goto finish;
1753
1754        device = adm_ctx.device;
1755        mutex_lock(&adm_ctx.resource->adm_mutex);
1756        peer_device = first_peer_device(device);
1757        connection = peer_device->connection;
1758        conn_reconfig_start(connection);
1759
1760        /* if you want to reconfigure, please tear down first */
1761        if (device->state.disk > D_DISKLESS) {
1762                retcode = ERR_DISK_CONFIGURED;
1763                goto fail;
1764        }
1765        /* It may just now have detached because of IO error.  Make sure
1766         * drbd_ldev_destroy is done already, we may end up here very fast,
1767         * e.g. if someone calls attach from the on-io-error handler,
1768         * to realize a "hot spare" feature (not that I'd recommend that) */
1769        wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1770
1771        /* make sure there is no leftover from previous force-detach attempts */
1772        clear_bit(FORCE_DETACH, &device->flags);
1773        clear_bit(WAS_IO_ERROR, &device->flags);
1774        clear_bit(WAS_READ_ERROR, &device->flags);
1775
1776        /* and no leftover from previously aborted resync or verify, either */
1777        device->rs_total = 0;
1778        device->rs_failed = 0;
1779        atomic_set(&device->rs_pending_cnt, 0);
1780
1781        /* allocation not in the IO path, drbdsetup context */
1782        nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1783        if (!nbc) {
1784                retcode = ERR_NOMEM;
1785                goto fail;
1786        }
1787        spin_lock_init(&nbc->md.uuid_lock);
1788
1789        new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1790        if (!new_disk_conf) {
1791                retcode = ERR_NOMEM;
1792                goto fail;
1793        }
1794        nbc->disk_conf = new_disk_conf;
1795
1796        set_disk_conf_defaults(new_disk_conf);
1797        err = disk_conf_from_attrs(new_disk_conf, info);
1798        if (err) {
1799                retcode = ERR_MANDATORY_TAG;
1800                drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1801                goto fail;
1802        }
1803
1804        if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1805                new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1806
1807        new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1808        if (!new_plan) {
1809                retcode = ERR_NOMEM;
1810                goto fail;
1811        }
1812
1813        if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1814                retcode = ERR_MD_IDX_INVALID;
1815                goto fail;
1816        }
1817
1818        rcu_read_lock();
1819        nc = rcu_dereference(connection->net_conf);
1820        if (nc) {
1821                if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1822                        rcu_read_unlock();
1823                        retcode = ERR_STONITH_AND_PROT_A;
1824                        goto fail;
1825                }
1826        }
1827        rcu_read_unlock();
1828
1829        retcode = open_backing_devices(device, new_disk_conf, nbc);
1830        if (retcode != NO_ERROR)
1831                goto fail;
1832
1833        if ((nbc->backing_bdev == nbc->md_bdev) !=
1834            (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1835             new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1836                retcode = ERR_MD_IDX_INVALID;
1837                goto fail;
1838        }
1839
1840        resync_lru = lc_create("resync", drbd_bm_ext_cache,
1841                        1, 61, sizeof(struct bm_extent),
1842                        offsetof(struct bm_extent, lce));
1843        if (!resync_lru) {
1844                retcode = ERR_NOMEM;
1845                goto fail;
1846        }
1847
1848        /* Read our meta data super block early.
1849         * This also sets other on-disk offsets. */
1850        retcode = drbd_md_read(device, nbc);
1851        if (retcode != NO_ERROR)
1852                goto fail;
1853
1854        sanitize_disk_conf(device, new_disk_conf, nbc);
1855
1856        if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1857                drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1858                        (unsigned long long) drbd_get_max_capacity(nbc),
1859                        (unsigned long long) new_disk_conf->disk_size);
1860                retcode = ERR_DISK_TOO_SMALL;
1861                goto fail;
1862        }
1863
1864        if (new_disk_conf->meta_dev_idx < 0) {
1865                max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1866                /* at least one MB, otherwise it does not make sense */
1867                min_md_device_sectors = (2<<10);
1868        } else {
1869                max_possible_sectors = DRBD_MAX_SECTORS;
1870                min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1871        }
1872
1873        if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1874                retcode = ERR_MD_DISK_TOO_SMALL;
1875                drbd_warn(device, "refusing attach: md-device too small, "
1876                     "at least %llu sectors needed for this meta-disk type\n",
1877                     (unsigned long long) min_md_device_sectors);
1878                goto fail;
1879        }
1880
1881        /* Make sure the new disk is big enough
1882         * (we may currently be R_PRIMARY with no local disk...) */
1883        if (drbd_get_max_capacity(nbc) <
1884            drbd_get_capacity(device->this_bdev)) {
1885                retcode = ERR_DISK_TOO_SMALL;
1886                goto fail;
1887        }
1888
1889        nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1890
1891        if (nbc->known_size > max_possible_sectors) {
1892                drbd_warn(device, "==> truncating very big lower level device "
1893                        "to currently maximum possible %llu sectors <==\n",
1894                        (unsigned long long) max_possible_sectors);
1895                if (new_disk_conf->meta_dev_idx >= 0)
1896                        drbd_warn(device, "==>> using internal or flexible "
1897                                      "meta data may help <<==\n");
1898        }
1899
1900        drbd_suspend_io(device);
1901        /* also wait for the last barrier ack. */
1902        /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1903         * We need a way to either ignore barrier acks for barriers sent before a device
1904         * was attached, or a way to wait for all pending barrier acks to come in.
1905         * As barriers are counted per resource,
1906         * we'd need to suspend io on all devices of a resource.
1907         */
1908        wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1909        /* and for any other previously queued work */
1910        drbd_flush_workqueue(&connection->sender_work);
1911
1912        rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1913        retcode = rv;  /* FIXME: Type mismatch. */
1914        drbd_resume_io(device);
1915        if (rv < SS_SUCCESS)
1916                goto fail;
1917
1918        if (!get_ldev_if_state(device, D_ATTACHING))
1919                goto force_diskless;
1920
1921        if (!device->bitmap) {
1922                if (drbd_bm_init(device)) {
1923                        retcode = ERR_NOMEM;
1924                        goto force_diskless_dec;
1925                }
1926        }
1927
1928        if (device->state.conn < C_CONNECTED &&
1929            device->state.role == R_PRIMARY && device->ed_uuid &&
1930            (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1931                drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1932                    (unsigned long long)device->ed_uuid);
1933                retcode = ERR_DATA_NOT_CURRENT;
1934                goto force_diskless_dec;
1935        }
1936
1937        /* Since we are diskless, fix the activity log first... */
1938        if (drbd_check_al_size(device, new_disk_conf)) {
1939                retcode = ERR_NOMEM;
1940                goto force_diskless_dec;
1941        }
1942
1943        /* Prevent shrinking of consistent devices ! */
1944        if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1945            drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1946                drbd_warn(device, "refusing to truncate a consistent device\n");
1947                retcode = ERR_DISK_TOO_SMALL;
1948                goto force_diskless_dec;
1949        }
1950
1951        lock_all_resources();
1952        retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1953        if (retcode != NO_ERROR) {
1954                unlock_all_resources();
1955                goto force_diskless_dec;
1956        }
1957
1958        /* Reset the "barriers don't work" bits here, then force meta data to
1959         * be written, to ensure we determine if barriers are supported. */
1960        if (new_disk_conf->md_flushes)
1961                clear_bit(MD_NO_FUA, &device->flags);
1962        else
1963                set_bit(MD_NO_FUA, &device->flags);
1964
1965        /* Point of no return reached.
1966         * Devices and memory are no longer released by error cleanup below.
1967         * now device takes over responsibility, and the state engine should
1968         * clean it up somewhere.  */
1969        D_ASSERT(device, device->ldev == NULL);
1970        device->ldev = nbc;
1971        device->resync = resync_lru;
1972        device->rs_plan_s = new_plan;
1973        nbc = NULL;
1974        resync_lru = NULL;
1975        new_disk_conf = NULL;
1976        new_plan = NULL;
1977
1978        drbd_resync_after_changed(device);
1979        drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
1980        unlock_all_resources();
1981
1982        if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1983                set_bit(CRASHED_PRIMARY, &device->flags);
1984        else
1985                clear_bit(CRASHED_PRIMARY, &device->flags);
1986
1987        if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1988            !(device->state.role == R_PRIMARY && device->resource->susp_nod))
1989                set_bit(CRASHED_PRIMARY, &device->flags);
1990
1991        device->send_cnt = 0;
1992        device->recv_cnt = 0;
1993        device->read_cnt = 0;
1994        device->writ_cnt = 0;
1995
1996        drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1997
1998        /* If I am currently not R_PRIMARY,
1999         * but meta data primary indicator is set,
2000         * I just now recover from a hard crash,
2001         * and have been R_PRIMARY before that crash.
2002         *
2003         * Now, if I had no connection before that crash
2004         * (have been degraded R_PRIMARY), chances are that
2005         * I won't find my peer now either.
2006         *
2007         * In that case, and _only_ in that case,
2008         * we use the degr-wfc-timeout instead of the default,
2009         * so we can automatically recover from a crash of a
2010         * degraded but active "cluster" after a certain timeout.
2011         */
2012        clear_bit(USE_DEGR_WFC_T, &device->flags);
2013        if (device->state.role != R_PRIMARY &&
2014             drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2015            !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2016                set_bit(USE_DEGR_WFC_T, &device->flags);
2017
2018        dd = drbd_determine_dev_size(device, 0, NULL);
2019        if (dd <= DS_ERROR) {
2020                retcode = ERR_NOMEM_BITMAP;
2021                goto force_diskless_dec;
2022        } else if (dd == DS_GREW)
2023                set_bit(RESYNC_AFTER_NEG, &device->flags);
2024
2025        if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2026            (test_bit(CRASHED_PRIMARY, &device->flags) &&
2027             drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2028                drbd_info(device, "Assuming that all blocks are out of sync "
2029                     "(aka FullSync)\n");
2030                if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2031                        "set_n_write from attaching", BM_LOCKED_MASK)) {
2032                        retcode = ERR_IO_MD_DISK;
2033                        goto force_diskless_dec;
2034                }
2035        } else {
2036                if (drbd_bitmap_io(device, &drbd_bm_read,
2037                        "read from attaching", BM_LOCKED_MASK)) {
2038                        retcode = ERR_IO_MD_DISK;
2039                        goto force_diskless_dec;
2040                }
2041        }
2042
2043        if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2044                drbd_suspend_al(device); /* IO is still suspended here... */
2045
2046        spin_lock_irq(&device->resource->req_lock);
2047        os = drbd_read_state(device);
2048        ns = os;
2049        /* If MDF_CONSISTENT is not set go into inconsistent state,
2050           otherwise investigate MDF_WasUpToDate...
2051           If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2052           otherwise into D_CONSISTENT state.
2053        */
2054        if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2055                if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2056                        ns.disk = D_CONSISTENT;
2057                else
2058                        ns.disk = D_OUTDATED;
2059        } else {
2060                ns.disk = D_INCONSISTENT;
2061        }
2062
2063        if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2064                ns.pdsk = D_OUTDATED;
2065
2066        rcu_read_lock();
2067        if (ns.disk == D_CONSISTENT &&
2068            (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2069                ns.disk = D_UP_TO_DATE;
2070
2071        /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2072           MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2073           this point, because drbd_request_state() modifies these
2074           flags. */
2075
2076        if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2077                device->ldev->md.flags &= ~MDF_AL_DISABLED;
2078        else
2079                device->ldev->md.flags |= MDF_AL_DISABLED;
2080
2081        rcu_read_unlock();
2082
2083        /* In case we are C_CONNECTED postpone any decision on the new disk
2084           state after the negotiation phase. */
2085        if (device->state.conn == C_CONNECTED) {
2086                device->new_state_tmp.i = ns.i;
2087                ns.i = os.i;
2088                ns.disk = D_NEGOTIATING;
2089
2090                /* We expect to receive up-to-date UUIDs soon.
2091                   To avoid a race in receive_state, free p_uuid while
2092                   holding req_lock. I.e. atomic with the state change */
2093                kfree(device->p_uuid);
2094                device->p_uuid = NULL;
2095        }
2096
2097        rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2098        spin_unlock_irq(&device->resource->req_lock);
2099
2100        if (rv < SS_SUCCESS)
2101                goto force_diskless_dec;
2102
2103        mod_timer(&device->request_timer, jiffies + HZ);
2104
2105        if (device->state.role == R_PRIMARY)
2106                device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
2107        else
2108                device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2109
2110        drbd_md_mark_dirty(device);
2111        drbd_md_sync(device);
2112
2113        kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2114        put_ldev(device);
2115        conn_reconfig_done(connection);
2116        mutex_unlock(&adm_ctx.resource->adm_mutex);
2117        drbd_adm_finish(&adm_ctx, info, retcode);
2118        return 0;
2119
2120 force_diskless_dec:
2121        put_ldev(device);
2122 force_diskless:
2123        drbd_force_state(device, NS(disk, D_DISKLESS));
2124        drbd_md_sync(device);
2125 fail:
2126        conn_reconfig_done(connection);
2127        if (nbc) {
2128                close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
2129                close_backing_dev(device, nbc->backing_bdev, true);
2130                kfree(nbc);
2131        }
2132        kfree(new_disk_conf);
2133        lc_destroy(resync_lru);
2134        kfree(new_plan);
2135        mutex_unlock(&adm_ctx.resource->adm_mutex);
2136 finish:
2137        drbd_adm_finish(&adm_ctx, info, retcode);
2138        return 0;
2139}
2140
2141static int adm_detach(struct drbd_device *device, int force)
2142{
2143        enum drbd_state_rv retcode;
2144        void *buffer;
2145        int ret;
2146
2147        if (force) {
2148                set_bit(FORCE_DETACH, &device->flags);
2149                drbd_force_state(device, NS(disk, D_FAILED));
2150                retcode = SS_SUCCESS;
2151                goto out;
2152        }
2153
2154        drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
2155        buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
2156        if (buffer) {
2157                retcode = drbd_request_state(device, NS(disk, D_FAILED));
2158                drbd_md_put_buffer(device);
2159        } else /* already <= D_FAILED */
2160                retcode = SS_NOTHING_TO_DO;
2161        /* D_FAILED will transition to DISKLESS. */
2162        drbd_resume_io(device);
2163        ret = wait_event_interruptible(device->misc_wait,
2164                        device->state.disk != D_FAILED);
2165        if ((int)retcode == (int)SS_IS_DISKLESS)
2166                retcode = SS_NOTHING_TO_DO;
2167        if (ret)
2168                retcode = ERR_INTR;
2169out:
2170        return retcode;
2171}
2172
2173/* Detaching the disk is a process in multiple stages.  First we need to lock
2174 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2175 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2176 * internal references as well.
2177 * Only then we have finally detached. */
2178int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2179{
2180        struct drbd_config_context adm_ctx;
2181        enum drbd_ret_code retcode;
2182        struct detach_parms parms = { };
2183        int err;
2184
2185        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2186        if (!adm_ctx.reply_skb)
2187                return retcode;
2188        if (retcode != NO_ERROR)
2189                goto out;
2190
2191        if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2192                err = detach_parms_from_attrs(&parms, info);
2193                if (err) {
2194                        retcode = ERR_MANDATORY_TAG;
2195                        drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2196                        goto out;
2197                }
2198        }
2199
2200        mutex_lock(&adm_ctx.resource->adm_mutex);
2201        retcode = adm_detach(adm_ctx.device, parms.force_detach);
2202        mutex_unlock(&adm_ctx.resource->adm_mutex);
2203out:
2204        drbd_adm_finish(&adm_ctx, info, retcode);
2205        return 0;
2206}
2207
2208static bool conn_resync_running(struct drbd_connection *connection)
2209{
2210        struct drbd_peer_device *peer_device;
2211        bool rv = false;
2212        int vnr;
2213
2214        rcu_read_lock();
2215        idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2216                struct drbd_device *device = peer_device->device;
2217                if (device->state.conn == C_SYNC_SOURCE ||
2218                    device->state.conn == C_SYNC_TARGET ||
2219                    device->state.conn == C_PAUSED_SYNC_S ||
2220                    device->state.conn == C_PAUSED_SYNC_T) {
2221                        rv = true;
2222                        break;
2223                }
2224        }
2225        rcu_read_unlock();
2226
2227        return rv;
2228}
2229
2230static bool conn_ov_running(struct drbd_connection *connection)
2231{
2232        struct drbd_peer_device *peer_device;
2233        bool rv = false;
2234        int vnr;
2235
2236        rcu_read_lock();
2237        idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2238                struct drbd_device *device = peer_device->device;
2239                if (device->state.conn == C_VERIFY_S ||
2240                    device->state.conn == C_VERIFY_T) {
2241                        rv = true;
2242                        break;
2243                }
2244        }
2245        rcu_read_unlock();
2246
2247        return rv;
2248}
2249
2250static enum drbd_ret_code
2251_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2252{
2253        struct drbd_peer_device *peer_device;
2254        int i;
2255
2256        if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2257                if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2258                        return ERR_NEED_APV_100;
2259
2260                if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2261                        return ERR_NEED_APV_100;
2262
2263                if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2264                        return ERR_NEED_APV_100;
2265        }
2266
2267        if (!new_net_conf->two_primaries &&
2268            conn_highest_role(connection) == R_PRIMARY &&
2269            conn_highest_peer(connection) == R_PRIMARY)
2270                return ERR_NEED_ALLOW_TWO_PRI;
2271
2272        if (new_net_conf->two_primaries &&
2273            (new_net_conf->wire_protocol != DRBD_PROT_C))
2274                return ERR_NOT_PROTO_C;
2275
2276        idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2277                struct drbd_device *device = peer_device->device;
2278                if (get_ldev(device)) {
2279                        enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2280                        put_ldev(device);
2281                        if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2282                                return ERR_STONITH_AND_PROT_A;
2283                }
2284                if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2285                        return ERR_DISCARD_IMPOSSIBLE;
2286        }
2287
2288        if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2289                return ERR_CONG_NOT_PROTO_A;
2290
2291        return NO_ERROR;
2292}
2293
2294static enum drbd_ret_code
2295check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2296{
2297        static enum drbd_ret_code rv;
2298        struct drbd_peer_device *peer_device;
2299        int i;
2300
2301        rcu_read_lock();
2302        rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2303        rcu_read_unlock();
2304
2305        /* connection->peer_devices protected by genl_lock() here */
2306        idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2307                struct drbd_device *device = peer_device->device;
2308                if (!device->bitmap) {
2309                        if (drbd_bm_init(device))
2310                                return ERR_NOMEM;
2311                }
2312        }
2313
2314        return rv;
2315}
2316
2317struct crypto {
2318        struct crypto_ahash *verify_tfm;
2319        struct crypto_ahash *csums_tfm;
2320        struct crypto_shash *cram_hmac_tfm;
2321        struct crypto_ahash *integrity_tfm;
2322};
2323
2324static int
2325alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2326{
2327        if (!tfm_name[0])
2328                return NO_ERROR;
2329
2330        *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2331        if (IS_ERR(*tfm)) {
2332                *tfm = NULL;
2333                return err_alg;
2334        }
2335
2336        return NO_ERROR;
2337}
2338
2339static int
2340alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
2341{
2342        if (!tfm_name[0])
2343                return NO_ERROR;
2344
2345        *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
2346        if (IS_ERR(*tfm)) {
2347                *tfm = NULL;
2348                return err_alg;
2349        }
2350
2351        return NO_ERROR;
2352}
2353
2354static enum drbd_ret_code
2355alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2356{
2357        char hmac_name[CRYPTO_MAX_ALG_NAME];
2358        enum drbd_ret_code rv;
2359
2360        rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
2361                         ERR_CSUMS_ALG);
2362        if (rv != NO_ERROR)
2363                return rv;
2364        rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
2365                         ERR_VERIFY_ALG);
2366        if (rv != NO_ERROR)
2367                return rv;
2368        rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2369                         ERR_INTEGRITY_ALG);
2370        if (rv != NO_ERROR)
2371                return rv;
2372        if (new_net_conf->cram_hmac_alg[0] != 0) {
2373                snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2374                         new_net_conf->cram_hmac_alg);
2375
2376                rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2377                                 ERR_AUTH_ALG);
2378        }
2379
2380        return rv;
2381}
2382
2383static void free_crypto(struct crypto *crypto)
2384{
2385        crypto_free_shash(crypto->cram_hmac_tfm);
2386        crypto_free_ahash(crypto->integrity_tfm);
2387        crypto_free_ahash(crypto->csums_tfm);
2388        crypto_free_ahash(crypto->verify_tfm);
2389}
2390
2391int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2392{
2393        struct drbd_config_context adm_ctx;
2394        enum drbd_ret_code retcode;
2395        struct drbd_connection *connection;
2396        struct net_conf *old_net_conf, *new_net_conf = NULL;
2397        int err;
2398        int ovr; /* online verify running */
2399        int rsr; /* re-sync running */
2400        struct crypto crypto = { };
2401
2402        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2403        if (!adm_ctx.reply_skb)
2404                return retcode;
2405        if (retcode != NO_ERROR)
2406                goto finish;
2407
2408        connection = adm_ctx.connection;
2409        mutex_lock(&adm_ctx.resource->adm_mutex);
2410
2411        new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2412        if (!new_net_conf) {
2413                retcode = ERR_NOMEM;
2414                goto out;
2415        }
2416
2417        conn_reconfig_start(connection);
2418
2419        mutex_lock(&connection->data.mutex);
2420        mutex_lock(&connection->resource->conf_update);
2421        old_net_conf = connection->net_conf;
2422
2423        if (!old_net_conf) {
2424                drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2425                retcode = ERR_INVALID_REQUEST;
2426                goto fail;
2427        }
2428
2429        *new_net_conf = *old_net_conf;
2430        if (should_set_defaults(info))
2431                set_net_conf_defaults(new_net_conf);
2432
2433        err = net_conf_from_attrs_for_change(new_net_conf, info);
2434        if (err && err != -ENOMSG) {
2435                retcode = ERR_MANDATORY_TAG;
2436                drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2437                goto fail;
2438        }
2439
2440        retcode = check_net_options(connection, new_net_conf);
2441        if (retcode != NO_ERROR)
2442                goto fail;
2443
2444        /* re-sync running */
2445        rsr = conn_resync_running(connection);
2446        if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2447                retcode = ERR_CSUMS_RESYNC_RUNNING;
2448                goto fail;
2449        }
2450
2451        /* online verify running */
2452        ovr = conn_ov_running(connection);
2453        if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2454                retcode = ERR_VERIFY_RUNNING;
2455                goto fail;
2456        }
2457
2458        retcode = alloc_crypto(&crypto, new_net_conf);
2459        if (retcode != NO_ERROR)
2460                goto fail;
2461
2462        rcu_assign_pointer(connection->net_conf, new_net_conf);
2463
2464        if (!rsr) {
2465                crypto_free_ahash(connection->csums_tfm);
2466                connection->csums_tfm = crypto.csums_tfm;
2467                crypto.csums_tfm = NULL;
2468        }
2469        if (!ovr) {
2470                crypto_free_ahash(connection->verify_tfm);
2471                connection->verify_tfm = crypto.verify_tfm;
2472                crypto.verify_tfm = NULL;
2473        }
2474
2475        crypto_free_ahash(connection->integrity_tfm);
2476        connection->integrity_tfm = crypto.integrity_tfm;
2477        if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2478                /* Do this without trying to take connection->data.mutex again.  */
2479                __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2480
2481        crypto_free_shash(connection->cram_hmac_tfm);
2482        connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2483
2484        mutex_unlock(&connection->resource->conf_update);
2485        mutex_unlock(&connection->data.mutex);
2486        synchronize_rcu();
2487        kfree(old_net_conf);
2488
2489        if (connection->cstate >= C_WF_REPORT_PARAMS) {
2490                struct drbd_peer_device *peer_device;
2491                int vnr;
2492
2493                idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2494                        drbd_send_sync_param(peer_device);
2495        }
2496
2497        goto done;
2498
2499 fail:
2500        mutex_unlock(&connection->resource->conf_update);
2501        mutex_unlock(&connection->data.mutex);
2502        free_crypto(&crypto);
2503        kfree(new_net_conf);
2504 done:
2505        conn_reconfig_done(connection);
2506 out:
2507        mutex_unlock(&adm_ctx.resource->adm_mutex);
2508 finish:
2509        drbd_adm_finish(&adm_ctx, info, retcode);
2510        return 0;
2511}
2512
2513static void connection_to_info(struct connection_info *info,
2514                               struct drbd_connection *connection)
2515{
2516        info->conn_connection_state = connection->cstate;
2517        info->conn_role = conn_highest_peer(connection);
2518}
2519
2520static void peer_device_to_info(struct peer_device_info *info,
2521                                struct drbd_peer_device *peer_device)
2522{
2523        struct drbd_device *device = peer_device->device;
2524
2525        info->peer_repl_state =
2526                max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2527        info->peer_disk_state = device->state.pdsk;
2528        info->peer_resync_susp_user = device->state.user_isp;
2529        info->peer_resync_susp_peer = device->state.peer_isp;
2530        info->peer_resync_susp_dependency = device->state.aftr_isp;
2531}
2532
2533int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2534{
2535        struct connection_info connection_info;
2536        enum drbd_notification_type flags;
2537        unsigned int peer_devices = 0;
2538        struct drbd_config_context adm_ctx;
2539        struct drbd_peer_device *peer_device;
2540        struct net_conf *old_net_conf, *new_net_conf = NULL;
2541        struct crypto crypto = { };
2542        struct drbd_resource *resource;
2543        struct drbd_connection *connection;
2544        enum drbd_ret_code retcode;
2545        int i;
2546        int err;
2547
2548        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2549
2550        if (!adm_ctx.reply_skb)
2551                return retcode;
2552        if (retcode != NO_ERROR)
2553                goto out;
2554        if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2555                drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2556                retcode = ERR_INVALID_REQUEST;
2557                goto out;
2558        }
2559
2560        /* No need for _rcu here. All reconfiguration is
2561         * strictly serialized on genl_lock(). We are protected against
2562         * concurrent reconfiguration/addition/deletion */
2563        for_each_resource(resource, &drbd_resources) {
2564                for_each_connection(connection, resource) {
2565                        if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2566                            !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2567                                    connection->my_addr_len)) {
2568                                retcode = ERR_LOCAL_ADDR;
2569                                goto out;
2570                        }
2571
2572                        if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2573                            !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2574                                    connection->peer_addr_len)) {
2575                                retcode = ERR_PEER_ADDR;
2576                                goto out;
2577                        }
2578                }
2579        }
2580
2581        mutex_lock(&adm_ctx.resource->adm_mutex);
2582        connection = first_connection(adm_ctx.resource);
2583        conn_reconfig_start(connection);
2584
2585        if (connection->cstate > C_STANDALONE) {
2586                retcode = ERR_NET_CONFIGURED;
2587                goto fail;
2588        }
2589
2590        /* allocation not in the IO path, drbdsetup / netlink process context */
2591        new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2592        if (!new_net_conf) {
2593                retcode = ERR_NOMEM;
2594                goto fail;
2595        }
2596
2597        set_net_conf_defaults(new_net_conf);
2598
2599        err = net_conf_from_attrs(new_net_conf, info);
2600        if (err && err != -ENOMSG) {
2601                retcode = ERR_MANDATORY_TAG;
2602                drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2603                goto fail;
2604        }
2605
2606        retcode = check_net_options(connection, new_net_conf);
2607        if (retcode != NO_ERROR)
2608                goto fail;
2609
2610        retcode = alloc_crypto(&crypto, new_net_conf);
2611        if (retcode != NO_ERROR)
2612                goto fail;
2613
2614        ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2615
2616        drbd_flush_workqueue(&connection->sender_work);
2617
2618        mutex_lock(&adm_ctx.resource->conf_update);
2619        old_net_conf = connection->net_conf;
2620        if (old_net_conf) {
2621                retcode = ERR_NET_CONFIGURED;
2622                mutex_unlock(&adm_ctx.resource->conf_update);
2623                goto fail;
2624        }
2625        rcu_assign_pointer(connection->net_conf, new_net_conf);
2626
2627        conn_free_crypto(connection);
2628        connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2629        connection->integrity_tfm = crypto.integrity_tfm;
2630        connection->csums_tfm = crypto.csums_tfm;
2631        connection->verify_tfm = crypto.verify_tfm;
2632
2633        connection->my_addr_len = nla_len(adm_ctx.my_addr);
2634        memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2635        connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2636        memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2637
2638        idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2639                peer_devices++;
2640        }
2641
2642        connection_to_info(&connection_info, connection);
2643        flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2644        mutex_lock(&notification_mutex);
2645        notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2646        idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2647                struct peer_device_info peer_device_info;
2648
2649                peer_device_to_info(&peer_device_info, peer_device);
2650                flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2651                notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2652        }
2653        mutex_unlock(&notification_mutex);
2654        mutex_unlock(&adm_ctx.resource->conf_update);
2655
2656        rcu_read_lock();
2657        idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2658                struct drbd_device *device = peer_device->device;
2659                device->send_cnt = 0;
2660                device->recv_cnt = 0;
2661        }
2662        rcu_read_unlock();
2663
2664        retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2665
2666        conn_reconfig_done(connection);
2667        mutex_unlock(&adm_ctx.resource->adm_mutex);
2668        drbd_adm_finish(&adm_ctx, info, retcode);
2669        return 0;
2670
2671fail:
2672        free_crypto(&crypto);
2673        kfree(new_net_conf);
2674
2675        conn_reconfig_done(connection);
2676        mutex_unlock(&adm_ctx.resource->adm_mutex);
2677out:
2678        drbd_adm_finish(&adm_ctx, info, retcode);
2679        return 0;
2680}
2681
2682static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2683{
2684        enum drbd_state_rv rv;
2685
2686        rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2687                        force ? CS_HARD : 0);
2688
2689        switch (rv) {
2690        case SS_NOTHING_TO_DO:
2691                break;
2692        case SS_ALREADY_STANDALONE:
2693                return SS_SUCCESS;
2694        case SS_PRIMARY_NOP:
2695                /* Our state checking code wants to see the peer outdated. */
2696                rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2697
2698                if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2699                        rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2700
2701                break;
2702        case SS_CW_FAILED_BY_PEER:
2703                /* The peer probably wants to see us outdated. */
2704                rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2705                                                        disk, D_OUTDATED), 0);
2706                if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2707                        rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2708                                        CS_HARD);
2709                }
2710                break;
2711        default:;
2712                /* no special handling necessary */
2713        }
2714
2715        if (rv >= SS_SUCCESS) {
2716                enum drbd_state_rv rv2;
2717                /* No one else can reconfigure the network while I am here.
2718                 * The state handling only uses drbd_thread_stop_nowait(),
2719                 * we want to really wait here until the receiver is no more.
2720                 */
2721                drbd_thread_stop(&connection->receiver);
2722
2723                /* Race breaker.  This additional state change request may be
2724                 * necessary, if this was a forced disconnect during a receiver
2725                 * restart.  We may have "killed" the receiver thread just
2726                 * after drbd_receiver() returned.  Typically, we should be
2727                 * C_STANDALONE already, now, and this becomes a no-op.
2728                 */
2729                rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2730                                CS_VERBOSE | CS_HARD);
2731                if (rv2 < SS_SUCCESS)
2732                        drbd_err(connection,
2733                                "unexpected rv2=%d in conn_try_disconnect()\n",
2734                                rv2);
2735                /* Unlike in DRBD 9, the state engine has generated
2736                 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2737        }
2738        return rv;
2739}
2740
2741int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2742{
2743        struct drbd_config_context adm_ctx;
2744        struct disconnect_parms parms;
2745        struct drbd_connection *connection;
2746        enum drbd_state_rv rv;
2747        enum drbd_ret_code retcode;
2748        int err;
2749
2750        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2751        if (!adm_ctx.reply_skb)
2752                return retcode;
2753        if (retcode != NO_ERROR)
2754                goto fail;
2755
2756        connection = adm_ctx.connection;
2757        memset(&parms, 0, sizeof(parms));
2758        if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2759                err = disconnect_parms_from_attrs(&parms, info);
2760                if (err) {
2761                        retcode = ERR_MANDATORY_TAG;
2762                        drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2763                        goto fail;
2764                }
2765        }
2766
2767        mutex_lock(&adm_ctx.resource->adm_mutex);
2768        rv = conn_try_disconnect(connection, parms.force_disconnect);
2769        if (rv < SS_SUCCESS)
2770                retcode = rv;  /* FIXME: Type mismatch. */
2771        else
2772                retcode = NO_ERROR;
2773        mutex_unlock(&adm_ctx.resource->adm_mutex);
2774 fail:
2775        drbd_adm_finish(&adm_ctx, info, retcode);
2776        return 0;
2777}
2778
2779void resync_after_online_grow(struct drbd_device *device)
2780{
2781        int iass; /* I am sync source */
2782
2783        drbd_info(device, "Resync of new storage after online grow\n");
2784        if (device->state.role != device->state.peer)
2785                iass = (device->state.role == R_PRIMARY);
2786        else
2787                iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2788
2789        if (iass)
2790                drbd_start_resync(device, C_SYNC_SOURCE);
2791        else
2792                _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2793}
2794
2795int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2796{
2797        struct drbd_config_context adm_ctx;
2798        struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2799        struct resize_parms rs;
2800        struct drbd_device *device;
2801        enum drbd_ret_code retcode;
2802        enum determine_dev_size dd;
2803        bool change_al_layout = false;
2804        enum dds_flags ddsf;
2805        sector_t u_size;
2806        int err;
2807
2808        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2809        if (!adm_ctx.reply_skb)
2810                return retcode;
2811        if (retcode != NO_ERROR)
2812                goto finish;
2813
2814        mutex_lock(&adm_ctx.resource->adm_mutex);
2815        device = adm_ctx.device;
2816        if (!get_ldev(device)) {
2817                retcode = ERR_NO_DISK;
2818                goto fail;
2819        }
2820
2821        memset(&rs, 0, sizeof(struct resize_parms));
2822        rs.al_stripes = device->ldev->md.al_stripes;
2823        rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2824        if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2825                err = resize_parms_from_attrs(&rs, info);
2826                if (err) {
2827                        retcode = ERR_MANDATORY_TAG;
2828                        drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2829                        goto fail_ldev;
2830                }
2831        }
2832
2833        if (device->state.conn > C_CONNECTED) {
2834                retcode = ERR_RESIZE_RESYNC;
2835                goto fail_ldev;
2836        }
2837
2838        if (device->state.role == R_SECONDARY &&
2839            device->state.peer == R_SECONDARY) {
2840                retcode = ERR_NO_PRIMARY;
2841                goto fail_ldev;
2842        }
2843
2844        if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2845                retcode = ERR_NEED_APV_93;
2846                goto fail_ldev;
2847        }
2848
2849        rcu_read_lock();
2850        u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2851        rcu_read_unlock();
2852        if (u_size != (sector_t)rs.resize_size) {
2853                new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2854                if (!new_disk_conf) {
2855                        retcode = ERR_NOMEM;
2856                        goto fail_ldev;
2857                }
2858        }
2859
2860        if (device->ldev->md.al_stripes != rs.al_stripes ||
2861            device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2862                u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2863
2864                if (al_size_k > (16 * 1024 * 1024)) {
2865                        retcode = ERR_MD_LAYOUT_TOO_BIG;
2866                        goto fail_ldev;
2867                }
2868
2869                if (al_size_k < MD_32kB_SECT/2) {
2870                        retcode = ERR_MD_LAYOUT_TOO_SMALL;
2871                        goto fail_ldev;
2872                }
2873
2874                if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2875                        retcode = ERR_MD_LAYOUT_CONNECTED;
2876                        goto fail_ldev;
2877                }
2878
2879                change_al_layout = true;
2880        }
2881
2882        if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2883                device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2884
2885        if (new_disk_conf) {
2886                mutex_lock(&device->resource->conf_update);
2887                old_disk_conf = device->ldev->disk_conf;
2888                *new_disk_conf = *old_disk_conf;
2889                new_disk_conf->disk_size = (sector_t)rs.resize_size;
2890                rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2891                mutex_unlock(&device->resource->conf_update);
2892                synchronize_rcu();
2893                kfree(old_disk_conf);
2894                new_disk_conf = NULL;
2895        }
2896
2897        ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2898        dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2899        drbd_md_sync(device);
2900        put_ldev(device);
2901        if (dd == DS_ERROR) {
2902                retcode = ERR_NOMEM_BITMAP;
2903                goto fail;
2904        } else if (dd == DS_ERROR_SPACE_MD) {
2905                retcode = ERR_MD_LAYOUT_NO_FIT;
2906                goto fail;
2907        } else if (dd == DS_ERROR_SHRINK) {
2908                retcode = ERR_IMPLICIT_SHRINK;
2909                goto fail;
2910        }
2911
2912        if (device->state.conn == C_CONNECTED) {
2913                if (dd == DS_GREW)
2914                        set_bit(RESIZE_PENDING, &device->flags);
2915
2916                drbd_send_uuids(first_peer_device(device));
2917                drbd_send_sizes(first_peer_device(device), 1, ddsf);
2918        }
2919
2920 fail:
2921        mutex_unlock(&adm_ctx.resource->adm_mutex);
2922 finish:
2923        drbd_adm_finish(&adm_ctx, info, retcode);
2924        return 0;
2925
2926 fail_ldev:
2927        put_ldev(device);
2928        kfree(new_disk_conf);
2929        goto fail;
2930}
2931
2932int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2933{
2934        struct drbd_config_context adm_ctx;
2935        enum drbd_ret_code retcode;
2936        struct res_opts res_opts;
2937        int err;
2938
2939        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2940        if (!adm_ctx.reply_skb)
2941                return retcode;
2942        if (retcode != NO_ERROR)
2943                goto fail;
2944
2945        res_opts = adm_ctx.resource->res_opts;
2946        if (should_set_defaults(info))
2947                set_res_opts_defaults(&res_opts);
2948
2949        err = res_opts_from_attrs(&res_opts, info);
2950        if (err && err != -ENOMSG) {
2951                retcode = ERR_MANDATORY_TAG;
2952                drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2953                goto fail;
2954        }
2955
2956        mutex_lock(&adm_ctx.resource->adm_mutex);
2957        err = set_resource_options(adm_ctx.resource, &res_opts);
2958        if (err) {
2959                retcode = ERR_INVALID_REQUEST;
2960                if (err == -ENOMEM)
2961                        retcode = ERR_NOMEM;
2962        }
2963        mutex_unlock(&adm_ctx.resource->adm_mutex);
2964
2965fail:
2966        drbd_adm_finish(&adm_ctx, info, retcode);
2967        return 0;
2968}
2969
2970int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2971{
2972        struct drbd_config_context adm_ctx;
2973        struct drbd_device *device;
2974        int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2975
2976        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2977        if (!adm_ctx.reply_skb)
2978                return retcode;
2979        if (retcode != NO_ERROR)
2980                goto out;
2981
2982        device = adm_ctx.device;
2983        if (!get_ldev(device)) {
2984                retcode = ERR_NO_DISK;
2985                goto out;
2986        }
2987
2988        mutex_lock(&adm_ctx.resource->adm_mutex);
2989
2990        /* If there is still bitmap IO pending, probably because of a previous
2991         * resync just being finished, wait for it before requesting a new resync.
2992         * Also wait for it's after_state_ch(). */
2993        drbd_suspend_io(device);
2994        wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2995        drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2996
2997        /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2998         * D_INCONSISTENT, and set all bits in the bitmap.  Otherwise,
2999         * try to start a resync handshake as sync target for full sync.
3000         */
3001        if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
3002                retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
3003                if (retcode >= SS_SUCCESS) {
3004                        if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
3005                                "set_n_write from invalidate", BM_LOCKED_MASK))
3006                                retcode = ERR_IO_MD_DISK;
3007                }
3008        } else
3009                retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
3010        drbd_resume_io(device);
3011        mutex_unlock(&adm_ctx.resource->adm_mutex);
3012        put_ldev(device);
3013out:
3014        drbd_adm_finish(&adm_ctx, info, retcode);
3015        return 0;
3016}
3017
3018static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
3019                union drbd_state mask, union drbd_state val)
3020{
3021        struct drbd_config_context adm_ctx;
3022        enum drbd_ret_code retcode;
3023
3024        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3025        if (!adm_ctx.reply_skb)
3026                return retcode;
3027        if (retcode != NO_ERROR)
3028                goto out;
3029
3030        mutex_lock(&adm_ctx.resource->adm_mutex);
3031        retcode = drbd_request_state(adm_ctx.device, mask, val);
3032        mutex_unlock(&adm_ctx.resource->adm_mutex);
3033out:
3034        drbd_adm_finish(&adm_ctx, info, retcode);
3035        return 0;
3036}
3037
3038static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
3039{
3040        int rv;
3041
3042        rv = drbd_bmio_set_n_write(device);
3043        drbd_suspend_al(device);
3044        return rv;
3045}
3046
3047int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3048{
3049        struct drbd_config_context adm_ctx;
3050        int retcode; /* drbd_ret_code, drbd_state_rv */
3051        struct drbd_device *device;
3052
3053        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3054        if (!adm_ctx.reply_skb)
3055                return retcode;
3056        if (retcode != NO_ERROR)
3057                goto out;
3058
3059        device = adm_ctx.device;
3060        if (!get_ldev(device)) {
3061                retcode = ERR_NO_DISK;
3062                goto out;
3063        }
3064
3065        mutex_lock(&adm_ctx.resource->adm_mutex);
3066
3067        /* If there is still bitmap IO pending, probably because of a previous
3068         * resync just being finished, wait for it before requesting a new resync.
3069         * Also wait for it's after_state_ch(). */
3070        drbd_suspend_io(device);
3071        wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3072        drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3073
3074        /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3075         * in the bitmap.  Otherwise, try to start a resync handshake
3076         * as sync source for full sync.
3077         */
3078        if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3079                /* The peer will get a resync upon connect anyways. Just make that
3080                   into a full resync. */
3081                retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3082                if (retcode >= SS_SUCCESS) {
3083                        if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3084                                "set_n_write from invalidate_peer",
3085                                BM_LOCKED_SET_ALLOWED))
3086                                retcode = ERR_IO_MD_DISK;
3087                }
3088        } else
3089                retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3090        drbd_resume_io(device);
3091        mutex_unlock(&adm_ctx.resource->adm_mutex);
3092        put_ldev(device);
3093out:
3094        drbd_adm_finish(&adm_ctx, info, retcode);
3095        return 0;
3096}
3097
3098int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3099{
3100        struct drbd_config_context adm_ctx;
3101        enum drbd_ret_code retcode;
3102
3103        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3104        if (!adm_ctx.reply_skb)
3105                return retcode;
3106        if (retcode != NO_ERROR)
3107                goto out;
3108
3109        mutex_lock(&adm_ctx.resource->adm_mutex);
3110        if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3111                retcode = ERR_PAUSE_IS_SET;
3112        mutex_unlock(&adm_ctx.resource->adm_mutex);
3113out:
3114        drbd_adm_finish(&adm_ctx, info, retcode);
3115        return 0;
3116}
3117
3118int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3119{
3120        struct drbd_config_context adm_ctx;
3121        union drbd_dev_state s;
3122        enum drbd_ret_code retcode;
3123
3124        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3125        if (!adm_ctx.reply_skb)
3126                return retcode;
3127        if (retcode != NO_ERROR)
3128                goto out;
3129
3130        mutex_lock(&adm_ctx.resource->adm_mutex);
3131        if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3132                s = adm_ctx.device->state;
3133                if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3134                        retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3135                                  s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3136                } else {
3137                        retcode = ERR_PAUSE_IS_CLEAR;
3138                }
3139        }
3140        mutex_unlock(&adm_ctx.resource->adm_mutex);
3141out:
3142        drbd_adm_finish(&adm_ctx, info, retcode);
3143        return 0;
3144}
3145
3146int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3147{
3148        return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3149}
3150
3151int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3152{
3153        struct drbd_config_context adm_ctx;
3154        struct drbd_device *device;
3155        int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3156
3157        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3158        if (!adm_ctx.reply_skb)
3159                return retcode;
3160        if (retcode != NO_ERROR)
3161                goto out;
3162
3163        mutex_lock(&adm_ctx.resource->adm_mutex);
3164        device = adm_ctx.device;
3165        if (test_bit(NEW_CUR_UUID, &device->flags)) {
3166                if (get_ldev_if_state(device, D_ATTACHING)) {
3167                        drbd_uuid_new_current(device);
3168                        put_ldev(device);
3169                } else {
3170                        /* This is effectively a multi-stage "forced down".
3171                         * The NEW_CUR_UUID bit is supposedly only set, if we
3172                         * lost the replication connection, and are configured
3173                         * to freeze IO and wait for some fence-peer handler.
3174                         * So we still don't have a replication connection.
3175                         * And now we don't have a local disk either.  After
3176                         * resume, we will fail all pending and new IO, because
3177                         * we don't have any data anymore.  Which means we will
3178                         * eventually be able to terminate all users of this
3179                         * device, and then take it down.  By bumping the
3180                         * "effective" data uuid, we make sure that you really
3181                         * need to tear down before you reconfigure, we will
3182                         * the refuse to re-connect or re-attach (because no
3183                         * matching real data uuid exists).
3184                         */
3185                        u64 val;
3186                        get_random_bytes(&val, sizeof(u64));
3187                        drbd_set_ed_uuid(device, val);
3188                        drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3189                }
3190                clear_bit(NEW_CUR_UUID, &device->flags);
3191        }
3192        drbd_suspend_io(device);
3193        retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3194        if (retcode == SS_SUCCESS) {
3195                if (device->state.conn < C_CONNECTED)
3196                        tl_clear(first_peer_device(device)->connection);
3197                if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3198                        tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3199        }
3200        drbd_resume_io(device);
3201        mutex_unlock(&adm_ctx.resource->adm_mutex);
3202out:
3203        drbd_adm_finish(&adm_ctx, info, retcode);
3204        return 0;
3205}
3206
3207int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3208{
3209        return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3210}
3211
3212static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3213                                    struct drbd_resource *resource,
3214                                    struct drbd_connection *connection,
3215                                    struct drbd_device *device)
3216{
3217        struct nlattr *nla;
3218        nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
3219        if (!nla)
3220                goto nla_put_failure;
3221        if (device &&
3222            nla_put_u32(skb, T_ctx_volume, device->vnr))
3223                goto nla_put_failure;
3224        if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3225                goto nla_put_failure;
3226        if (connection) {
3227                if (connection->my_addr_len &&
3228                    nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3229                        goto nla_put_failure;
3230                if (connection->peer_addr_len &&
3231                    nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3232                        goto nla_put_failure;
3233        }
3234        nla_nest_end(skb, nla);
3235        return 0;
3236
3237nla_put_failure:
3238        if (nla)
3239                nla_nest_cancel(skb, nla);
3240        return -EMSGSIZE;
3241}
3242
3243/*
3244 * The generic netlink dump callbacks are called outside the genl_lock(), so
3245 * they cannot use the simple attribute parsing code which uses global
3246 * attribute tables.
3247 */
3248static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3249{
3250        const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3251        const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3252        struct nlattr *nla;
3253
3254        nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3255                       DRBD_NLA_CFG_CONTEXT);
3256        if (!nla)
3257                return NULL;
3258        return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3259}
3260
3261static void resource_to_info(struct resource_info *, struct drbd_resource *);
3262
3263int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3264{
3265        struct drbd_genlmsghdr *dh;
3266        struct drbd_resource *resource;
3267        struct resource_info resource_info;
3268        struct resource_statistics resource_statistics;
3269        int err;
3270
3271        rcu_read_lock();
3272        if (cb->args[0]) {
3273                for_each_resource_rcu(resource, &drbd_resources)
3274                        if (resource == (struct drbd_resource *)cb->args[0])
3275                                goto found_resource;
3276                err = 0;  /* resource was probably deleted */
3277                goto out;
3278        }
3279        resource = list_entry(&drbd_resources,
3280                              struct drbd_resource, resources);
3281
3282found_resource:
3283        list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3284                goto put_result;
3285        }
3286        err = 0;
3287        goto out;
3288
3289put_result:
3290        dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3291                        cb->nlh->nlmsg_seq, &drbd_genl_family,
3292                        NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3293        err = -ENOMEM;
3294        if (!dh)
3295                goto out;
3296        dh->minor = -1U;
3297        dh->ret_code = NO_ERROR;
3298        err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3299        if (err)
3300                goto out;
3301        err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3302        if (err)
3303                goto out;
3304        resource_to_info(&resource_info, resource);
3305        err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3306        if (err)
3307                goto out;
3308        resource_statistics.res_stat_write_ordering = resource->write_ordering;
3309        err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3310        if (err)
3311                goto out;
3312        cb->args[0] = (long)resource;
3313        genlmsg_end(skb, dh);
3314        err = 0;
3315
3316out:
3317        rcu_read_unlock();
3318        if (err)
3319                return err;
3320        return skb->len;
3321}
3322
3323static void device_to_statistics(struct device_statistics *s,
3324                                 struct drbd_device *device)
3325{
3326        memset(s, 0, sizeof(*s));
3327        s->dev_upper_blocked = !may_inc_ap_bio(device);
3328        if (get_ldev(device)) {
3329                struct drbd_md *md = &device->ldev->md;
3330                u64 *history_uuids = (u64 *)s->history_uuids;
3331                struct request_queue *q;
3332                int n;
3333
3334                spin_lock_irq(&md->uuid_lock);
3335                s->dev_current_uuid = md->uuid[UI_CURRENT];
3336                BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3337                for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3338                        history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3339                for (; n < HISTORY_UUIDS; n++)
3340                        history_uuids[n] = 0;
3341                s->history_uuids_len = HISTORY_UUIDS;
3342                spin_unlock_irq(&md->uuid_lock);
3343
3344                s->dev_disk_flags = md->flags;
3345                q = bdev_get_queue(device->ldev->backing_bdev);
3346                s->dev_lower_blocked =
3347                        bdi_congested(q->backing_dev_info,
3348                                      (1 << WB_async_congested) |
3349                                      (1 << WB_sync_congested));
3350                put_ldev(device);
3351        }
3352        s->dev_size = drbd_get_capacity(device->this_bdev);
3353        s->dev_read = device->read_cnt;
3354        s->dev_write = device->writ_cnt;
3355        s->dev_al_writes = device->al_writ_cnt;
3356        s->dev_bm_writes = device->bm_writ_cnt;
3357        s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3358        s->dev_lower_pending = atomic_read(&device->local_cnt);
3359        s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3360        s->dev_exposed_data_uuid = device->ed_uuid;
3361}
3362
3363static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3364{
3365        if (cb->args[0]) {
3366                struct drbd_resource *resource =
3367                        (struct drbd_resource *)cb->args[0];
3368                kref_put(&resource->kref, drbd_destroy_resource);
3369        }
3370
3371        return 0;
3372}
3373
3374int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3375        return put_resource_in_arg0(cb, 7);
3376}
3377
3378static void device_to_info(struct device_info *, struct drbd_device *);
3379
3380int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3381{
3382        struct nlattr *resource_filter;
3383        struct drbd_resource *resource;
3384        struct drbd_device *uninitialized_var(device);
3385        int minor, err, retcode;
3386        struct drbd_genlmsghdr *dh;
3387        struct device_info device_info;
3388        struct device_statistics device_statistics;
3389        struct idr *idr_to_search;
3390
3391        resource = (struct drbd_resource *)cb->args[0];
3392        if (!cb->args[0] && !cb->args[1]) {
3393                resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3394                if (resource_filter) {
3395                        retcode = ERR_RES_NOT_KNOWN;
3396                        resource = drbd_find_resource(nla_data(resource_filter));
3397                        if (!resource)
3398                                goto put_result;
3399                        cb->args[0] = (long)resource;
3400                }
3401        }
3402
3403        rcu_read_lock();
3404        minor = cb->args[1];
3405        idr_to_search = resource ? &resource->devices : &drbd_devices;
3406        device = idr_get_next(idr_to_search, &minor);
3407        if (!device) {
3408                err = 0;
3409                goto out;
3410        }
3411        idr_for_each_entry_continue(idr_to_search, device, minor) {
3412                retcode = NO_ERROR;
3413                goto put_result;  /* only one iteration */
3414        }
3415        err = 0;
3416        goto out;  /* no more devices */
3417
3418put_result:
3419        dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3420                        cb->nlh->nlmsg_seq, &drbd_genl_family,
3421                        NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3422        err = -ENOMEM;
3423        if (!dh)
3424                goto out;
3425        dh->ret_code = retcode;
3426        dh->minor = -1U;
3427        if (retcode == NO_ERROR) {
3428                dh->minor = device->minor;
3429                err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3430                if (err)
3431                        goto out;
3432                if (get_ldev(device)) {
3433                        struct disk_conf *disk_conf =
3434                                rcu_dereference(device->ldev->disk_conf);
3435
3436                        err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3437                        put_ldev(device);
3438                        if (err)
3439                                goto out;
3440                }
3441                device_to_info(&device_info, device);
3442                err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3443                if (err)
3444                        goto out;
3445
3446                device_to_statistics(&device_statistics, device);
3447                err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3448                if (err)
3449                        goto out;
3450                cb->args[1] = minor + 1;
3451        }
3452        genlmsg_end(skb, dh);
3453        err = 0;
3454
3455out:
3456        rcu_read_unlock();
3457        if (err)
3458                return err;
3459        return skb->len;
3460}
3461
3462int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3463{
3464        return put_resource_in_arg0(cb, 6);
3465}
3466
3467enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3468
3469int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3470{
3471        struct nlattr *resource_filter;
3472        struct drbd_resource *resource = NULL, *next_resource;
3473        struct drbd_connection *uninitialized_var(connection);
3474        int err = 0, retcode;
3475        struct drbd_genlmsghdr *dh;
3476        struct connection_info connection_info;
3477        struct connection_statistics connection_statistics;
3478
3479        rcu_read_lock();
3480        resource = (struct drbd_resource *)cb->args[0];
3481        if (!cb->args[0]) {
3482                resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3483                if (resource_filter) {
3484                        retcode = ERR_RES_NOT_KNOWN;
3485                        resource = drbd_find_resource(nla_data(resource_filter));
3486                        if (!resource)
3487                                goto put_result;
3488                        cb->args[0] = (long)resource;
3489                        cb->args[1] = SINGLE_RESOURCE;
3490                }
3491        }
3492        if (!resource) {
3493                if (list_empty(&drbd_resources))
3494                        goto out;
3495                resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3496                kref_get(&resource->kref);
3497                cb->args[0] = (long)resource;
3498                cb->args[1] = ITERATE_RESOURCES;
3499        }
3500
3501    next_resource:
3502        rcu_read_unlock();
3503        mutex_lock(&resource->conf_update);
3504        rcu_read_lock();
3505        if (cb->args[2]) {
3506                for_each_connection_rcu(connection, resource)
3507                        if (connection == (struct drbd_connection *)cb->args[2])
3508                                goto found_connection;
3509                /* connection was probably deleted */
3510                goto no_more_connections;
3511        }
3512        connection = list_entry(&resource->connections, struct drbd_connection, connections);
3513
3514found_connection:
3515        list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3516                if (!has_net_conf(connection))
3517                        continue;
3518                retcode = NO_ERROR;
3519                goto put_result;  /* only one iteration */
3520        }
3521
3522no_more_connections:
3523        if (cb->args[1] == ITERATE_RESOURCES) {
3524                for_each_resource_rcu(next_resource, &drbd_resources) {
3525                        if (next_resource == resource)
3526                                goto found_resource;
3527                }
3528                /* resource was probably deleted */
3529        }
3530        goto out;
3531
3532found_resource:
3533        list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3534                mutex_unlock(&resource->conf_update);
3535                kref_put(&resource->kref, drbd_destroy_resource);
3536                resource = next_resource;
3537                kref_get(&resource->kref);
3538                cb->args[0] = (long)resource;
3539                cb->args[2] = 0;
3540                goto next_resource;
3541        }
3542        goto out;  /* no more resources */
3543
3544put_result:
3545        dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3546                        cb->nlh->nlmsg_seq, &drbd_genl_family,
3547                        NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3548        err = -ENOMEM;
3549        if (!dh)
3550                goto out;
3551        dh->ret_code = retcode;
3552        dh->minor = -1U;
3553        if (retcode == NO_ERROR) {
3554                struct net_conf *net_conf;
3555
3556                err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3557                if (err)
3558                        goto out;
3559                net_conf = rcu_dereference(connection->net_conf);
3560                if (net_conf) {
3561                        err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3562                        if (err)
3563                                goto out;
3564                }
3565                connection_to_info(&connection_info, connection);
3566                err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3567                if (err)
3568                        goto out;
3569                connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3570                err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3571                if (err)
3572                        goto out;
3573                cb->args[2] = (long)connection;
3574        }
3575        genlmsg_end(skb, dh);
3576        err = 0;
3577
3578out:
3579        rcu_read_unlock();
3580        if (resource)
3581                mutex_unlock(&resource->conf_update);
3582        if (err)
3583                return err;
3584        return skb->len;
3585}
3586
3587enum mdf_peer_flag {
3588        MDF_PEER_CONNECTED =    1 << 0,
3589        MDF_PEER_OUTDATED =     1 << 1,
3590        MDF_PEER_FENCING =      1 << 2,
3591        MDF_PEER_FULL_SYNC =    1 << 3,
3592};
3593
3594static void peer_device_to_statistics(struct peer_device_statistics *s,
3595                                      struct drbd_peer_device *peer_device)
3596{
3597        struct drbd_device *device = peer_device->device;
3598
3599        memset(s, 0, sizeof(*s));
3600        s->peer_dev_received = device->recv_cnt;
3601        s->peer_dev_sent = device->send_cnt;
3602        s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3603                              atomic_read(&device->rs_pending_cnt);
3604        s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3605        s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3606        s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3607        if (get_ldev(device)) {
3608                struct drbd_md *md = &device->ldev->md;
3609
3610                spin_lock_irq(&md->uuid_lock);
3611                s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3612                spin_unlock_irq(&md->uuid_lock);
3613                s->peer_dev_flags =
3614                        (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3615                                MDF_PEER_CONNECTED : 0) +
3616                        (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3617                         !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3618                                MDF_PEER_OUTDATED : 0) +
3619                        /* FIXME: MDF_PEER_FENCING? */
3620                        (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3621                                MDF_PEER_FULL_SYNC : 0);
3622                put_ldev(device);
3623        }
3624}
3625
3626int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3627{
3628        return put_resource_in_arg0(cb, 9);
3629}
3630
3631int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3632{
3633        struct nlattr *resource_filter;
3634        struct drbd_resource *resource;
3635        struct drbd_device *uninitialized_var(device);
3636        struct drbd_peer_device *peer_device = NULL;
3637        int minor, err, retcode;
3638        struct drbd_genlmsghdr *dh;
3639        struct idr *idr_to_search;
3640
3641        resource = (struct drbd_resource *)cb->args[0];
3642        if (!cb->args[0] && !cb->args[1]) {
3643                resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3644                if (resource_filter) {
3645                        retcode = ERR_RES_NOT_KNOWN;
3646                        resource = drbd_find_resource(nla_data(resource_filter));
3647                        if (!resource)
3648                                goto put_result;
3649                }
3650                cb->args[0] = (long)resource;
3651        }
3652
3653        rcu_read_lock();
3654        minor = cb->args[1];
3655        idr_to_search = resource ? &resource->devices : &drbd_devices;
3656        device = idr_find(idr_to_search, minor);
3657        if (!device) {
3658next_device:
3659                minor++;
3660                cb->args[2] = 0;
3661                device = idr_get_next(idr_to_search, &minor);
3662                if (!device) {
3663                        err = 0;
3664                        goto out;
3665                }
3666        }
3667        if (cb->args[2]) {
3668                for_each_peer_device(peer_device, device)
3669                        if (peer_device == (struct drbd_peer_device *)cb->args[2])
3670                                goto found_peer_device;
3671                /* peer device was probably deleted */
3672                goto next_device;
3673        }
3674        /* Make peer_device point to the list head (not the first entry). */
3675        peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3676
3677found_peer_device:
3678        list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3679                if (!has_net_conf(peer_device->connection))
3680                        continue;
3681                retcode = NO_ERROR;
3682                goto put_result;  /* only one iteration */
3683        }
3684        goto next_device;
3685
3686put_result:
3687        dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3688                        cb->nlh->nlmsg_seq, &drbd_genl_family,
3689                        NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3690        err = -ENOMEM;
3691        if (!dh)
3692                goto out;
3693        dh->ret_code = retcode;
3694        dh->minor = -1U;
3695        if (retcode == NO_ERROR) {
3696                struct peer_device_info peer_device_info;
3697                struct peer_device_statistics peer_device_statistics;
3698
3699                dh->minor = minor;
3700                err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3701                if (err)
3702                        goto out;
3703                peer_device_to_info(&peer_device_info, peer_device);
3704                err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3705                if (err)
3706                        goto out;
3707                peer_device_to_statistics(&peer_device_statistics, peer_device);
3708                err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3709                if (err)
3710                        goto out;
3711                cb->args[1] = minor;
3712                cb->args[2] = (long)peer_device;
3713        }
3714        genlmsg_end(skb, dh);
3715        err = 0;
3716
3717out:
3718        rcu_read_unlock();
3719        if (err)
3720                return err;
3721        return skb->len;
3722}
3723/*
3724 * Return the connection of @resource if @resource has exactly one connection.
3725 */
3726static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3727{
3728        struct list_head *connections = &resource->connections;
3729
3730        if (list_empty(connections) || connections->next->next != connections)
3731                return NULL;
3732        return list_first_entry(&resource->connections, struct drbd_connection, connections);
3733}
3734
3735static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3736                const struct sib_info *sib)
3737{
3738        struct drbd_resource *resource = device->resource;
3739        struct state_info *si = NULL; /* for sizeof(si->member); */
3740        struct nlattr *nla;
3741        int got_ldev;
3742        int err = 0;
3743        int exclude_sensitive;
3744
3745        /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3746         * to.  So we better exclude_sensitive information.
3747         *
3748         * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3749         * in the context of the requesting user process. Exclude sensitive
3750         * information, unless current has superuser.
3751         *
3752         * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3753         * relies on the current implementation of netlink_dump(), which
3754         * executes the dump callback successively from netlink_recvmsg(),
3755         * always in the context of the receiving process */
3756        exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3757
3758        got_ldev = get_ldev(device);
3759
3760        /* We need to add connection name and volume number information still.
3761         * Minor number is in drbd_genlmsghdr. */
3762        if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3763                goto nla_put_failure;
3764
3765        if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3766                goto nla_put_failure;
3767
3768        rcu_read_lock();
3769        if (got_ldev) {
3770                struct disk_conf *disk_conf;
3771
3772                disk_conf = rcu_dereference(device->ldev->disk_conf);
3773                err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3774        }
3775        if (!err) {
3776                struct net_conf *nc;
3777
3778                nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3779                if (nc)
3780                        err = net_conf_to_skb(skb, nc, exclude_sensitive);
3781        }
3782        rcu_read_unlock();
3783        if (err)
3784                goto nla_put_failure;
3785
3786        nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
3787        if (!nla)
3788                goto nla_put_failure;
3789        if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3790            nla_put_u32(skb, T_current_state, device->state.i) ||
3791            nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3792            nla_put_u64_0pad(skb, T_capacity,
3793                             drbd_get_capacity(device->this_bdev)) ||
3794            nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3795            nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3796            nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3797            nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3798            nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3799            nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3800            nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3801            nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3802            nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3803                goto nla_put_failure;
3804
3805        if (got_ldev) {
3806                int err;
3807
3808                spin_lock_irq(&device->ldev->md.uuid_lock);
3809                err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3810                spin_unlock_irq(&device->ldev->md.uuid_lock);
3811
3812                if (err)
3813                        goto nla_put_failure;
3814
3815                if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3816                    nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3817                    nla_put_u64_0pad(skb, T_bits_oos,
3818                                     drbd_bm_total_weight(device)))
3819                        goto nla_put_failure;
3820                if (C_SYNC_SOURCE <= device->state.conn &&
3821                    C_PAUSED_SYNC_T >= device->state.conn) {
3822                        if (nla_put_u64_0pad(skb, T_bits_rs_total,
3823                                             device->rs_total) ||
3824                            nla_put_u64_0pad(skb, T_bits_rs_failed,
3825                                             device->rs_failed))
3826                                goto nla_put_failure;
3827                }
3828        }
3829
3830        if (sib) {
3831                switch(sib->sib_reason) {
3832                case SIB_SYNC_PROGRESS:
3833                case SIB_GET_STATUS_REPLY:
3834                        break;
3835                case SIB_STATE_CHANGE:
3836                        if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3837                            nla_put_u32(skb, T_new_state, sib->ns.i))
3838                                goto nla_put_failure;
3839                        break;
3840                case SIB_HELPER_POST:
3841                        if (nla_put_u32(skb, T_helper_exit_code,
3842                                        sib->helper_exit_code))
3843                                goto nla_put_failure;
3844                        /* fall through */
3845                case SIB_HELPER_PRE:
3846                        if (nla_put_string(skb, T_helper, sib->helper_name))
3847                                goto nla_put_failure;
3848                        break;
3849                }
3850        }
3851        nla_nest_end(skb, nla);
3852
3853        if (0)
3854nla_put_failure:
3855                err = -EMSGSIZE;
3856        if (got_ldev)
3857                put_ldev(device);
3858        return err;
3859}
3860
3861int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3862{
3863        struct drbd_config_context adm_ctx;
3864        enum drbd_ret_code retcode;
3865        int err;
3866
3867        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3868        if (!adm_ctx.reply_skb)
3869                return retcode;
3870        if (retcode != NO_ERROR)
3871                goto out;
3872
3873        err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3874        if (err) {
3875                nlmsg_free(adm_ctx.reply_skb);
3876                return err;
3877        }
3878out:
3879        drbd_adm_finish(&adm_ctx, info, retcode);
3880        return 0;
3881}
3882
3883static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3884{
3885        struct drbd_device *device;
3886        struct drbd_genlmsghdr *dh;
3887        struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3888        struct drbd_resource *resource = NULL;
3889        struct drbd_resource *tmp;
3890        unsigned volume = cb->args[1];
3891
3892        /* Open coded, deferred, iteration:
3893         * for_each_resource_safe(resource, tmp, &drbd_resources) {
3894         *      connection = "first connection of resource or undefined";
3895         *      idr_for_each_entry(&resource->devices, device, i) {
3896         *        ...
3897         *      }
3898         * }
3899         * where resource is cb->args[0];
3900         * and i is cb->args[1];
3901         *
3902         * cb->args[2] indicates if we shall loop over all resources,
3903         * or just dump all volumes of a single resource.
3904         *
3905         * This may miss entries inserted after this dump started,
3906         * or entries deleted before they are reached.
3907         *
3908         * We need to make sure the device won't disappear while
3909         * we are looking at it, and revalidate our iterators
3910         * on each iteration.
3911         */
3912
3913        /* synchronize with conn_create()/drbd_destroy_connection() */
3914        rcu_read_lock();
3915        /* revalidate iterator position */
3916        for_each_resource_rcu(tmp, &drbd_resources) {
3917                if (pos == NULL) {
3918                        /* first iteration */
3919                        pos = tmp;
3920                        resource = pos;
3921                        break;
3922                }
3923                if (tmp == pos) {
3924                        resource = pos;
3925                        break;
3926                }
3927        }
3928        if (resource) {
3929next_resource:
3930                device = idr_get_next(&resource->devices, &volume);
3931                if (!device) {
3932                        /* No more volumes to dump on this resource.
3933                         * Advance resource iterator. */
3934                        pos = list_entry_rcu(resource->resources.next,
3935                                             struct drbd_resource, resources);
3936                        /* Did we dump any volume of this resource yet? */
3937                        if (volume != 0) {
3938                                /* If we reached the end of the list,
3939                                 * or only a single resource dump was requested,
3940                                 * we are done. */
3941                                if (&pos->resources == &drbd_resources || cb->args[2])
3942                                        goto out;
3943                                volume = 0;
3944                                resource = pos;
3945                                goto next_resource;
3946                        }
3947                }
3948
3949                dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3950                                cb->nlh->nlmsg_seq, &drbd_genl_family,
3951                                NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3952                if (!dh)
3953                        goto out;
3954
3955                if (!device) {
3956                        /* This is a connection without a single volume.
3957                         * Suprisingly enough, it may have a network
3958                         * configuration. */
3959                        struct drbd_connection *connection;
3960
3961                        dh->minor = -1U;
3962                        dh->ret_code = NO_ERROR;
3963                        connection = the_only_connection(resource);
3964                        if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3965                                goto cancel;
3966                        if (connection) {
3967                                struct net_conf *nc;
3968
3969                                nc = rcu_dereference(connection->net_conf);
3970                                if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3971                                        goto cancel;
3972                        }
3973                        goto done;
3974                }
3975
3976                D_ASSERT(device, device->vnr == volume);
3977                D_ASSERT(device, device->resource == resource);
3978
3979                dh->minor = device_to_minor(device);
3980                dh->ret_code = NO_ERROR;
3981
3982                if (nla_put_status_info(skb, device, NULL)) {
3983cancel:
3984                        genlmsg_cancel(skb, dh);
3985                        goto out;
3986                }
3987done:
3988                genlmsg_end(skb, dh);
3989        }
3990
3991out:
3992        rcu_read_unlock();
3993        /* where to start the next iteration */
3994        cb->args[0] = (long)pos;
3995        cb->args[1] = (pos == resource) ? volume + 1 : 0;
3996
3997        /* No more resources/volumes/minors found results in an empty skb.
3998         * Which will terminate the dump. */
3999        return skb->len;
4000}
4001
4002/*
4003 * Request status of all resources, or of all volumes within a single resource.
4004 *
4005 * This is a dump, as the answer may not fit in a single reply skb otherwise.
4006 * Which means we cannot use the family->attrbuf or other such members, because
4007 * dump is NOT protected by the genl_lock().  During dump, we only have access
4008 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4009 *
4010 * Once things are setup properly, we call into get_one_status().
4011 */
4012int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
4013{
4014        const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
4015        struct nlattr *nla;
4016        const char *resource_name;
4017        struct drbd_resource *resource;
4018        int maxtype;
4019
4020        /* Is this a followup call? */
4021        if (cb->args[0]) {
4022                /* ... of a single resource dump,
4023                 * and the resource iterator has been advanced already? */
4024                if (cb->args[2] && cb->args[2] != cb->args[0])
4025                        return 0; /* DONE. */
4026                goto dump;
4027        }
4028
4029        /* First call (from netlink_dump_start).  We need to figure out
4030         * which resource(s) the user wants us to dump. */
4031        nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
4032                        nlmsg_attrlen(cb->nlh, hdrlen),
4033                        DRBD_NLA_CFG_CONTEXT);
4034
4035        /* No explicit context given.  Dump all. */
4036        if (!nla)
4037                goto dump;
4038        maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4039        nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4040        if (IS_ERR(nla))
4041                return PTR_ERR(nla);
4042        /* context given, but no name present? */
4043        if (!nla)
4044                return -EINVAL;
4045        resource_name = nla_data(nla);
4046        if (!*resource_name)
4047                return -ENODEV;
4048        resource = drbd_find_resource(resource_name);
4049        if (!resource)
4050                return -ENODEV;
4051
4052        kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4053
4054        /* prime iterators, and set "filter" mode mark:
4055         * only dump this connection. */
4056        cb->args[0] = (long)resource;
4057        /* cb->args[1] = 0; passed in this way. */
4058        cb->args[2] = (long)resource;
4059
4060dump:
4061        return get_one_status(skb, cb);
4062}
4063
4064int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4065{
4066        struct drbd_config_context adm_ctx;
4067        enum drbd_ret_code retcode;
4068        struct timeout_parms tp;
4069        int err;
4070
4071        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4072        if (!adm_ctx.reply_skb)
4073                return retcode;
4074        if (retcode != NO_ERROR)
4075                goto out;
4076
4077        tp.timeout_type =
4078                adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4079                test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
4080                UT_DEFAULT;
4081
4082        err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4083        if (err) {
4084                nlmsg_free(adm_ctx.reply_skb);
4085                return err;
4086        }
4087out:
4088        drbd_adm_finish(&adm_ctx, info, retcode);
4089        return 0;
4090}
4091
4092int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4093{
4094        struct drbd_config_context adm_ctx;
4095        struct drbd_device *device;
4096        enum drbd_ret_code retcode;
4097        struct start_ov_parms parms;
4098
4099        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4100        if (!adm_ctx.reply_skb)
4101                return retcode;
4102        if (retcode != NO_ERROR)
4103                goto out;
4104
4105        device = adm_ctx.device;
4106
4107        /* resume from last known position, if possible */
4108        parms.ov_start_sector = device->ov_start_sector;
4109        parms.ov_stop_sector = ULLONG_MAX;
4110        if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4111                int err = start_ov_parms_from_attrs(&parms, info);
4112                if (err) {
4113                        retcode = ERR_MANDATORY_TAG;
4114                        drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4115                        goto out;
4116                }
4117        }
4118        mutex_lock(&adm_ctx.resource->adm_mutex);
4119
4120        /* w_make_ov_request expects position to be aligned */
4121        device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4122        device->ov_stop_sector = parms.ov_stop_sector;
4123
4124        /* If there is still bitmap IO pending, e.g. previous resync or verify
4125         * just being finished, wait for it before requesting a new resync. */
4126        drbd_suspend_io(device);
4127        wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4128        retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4129        drbd_resume_io(device);
4130
4131        mutex_unlock(&adm_ctx.resource->adm_mutex);
4132out:
4133        drbd_adm_finish(&adm_ctx, info, retcode);
4134        return 0;
4135}
4136
4137
4138int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4139{
4140        struct drbd_config_context adm_ctx;
4141        struct drbd_device *device;
4142        enum drbd_ret_code retcode;
4143        int skip_initial_sync = 0;
4144        int err;
4145        struct new_c_uuid_parms args;
4146
4147        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4148        if (!adm_ctx.reply_skb)
4149                return retcode;
4150        if (retcode != NO_ERROR)
4151                goto out_nolock;
4152
4153        device = adm_ctx.device;
4154        memset(&args, 0, sizeof(args));
4155        if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4156                err = new_c_uuid_parms_from_attrs(&args, info);
4157                if (err) {
4158                        retcode = ERR_MANDATORY_TAG;
4159                        drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4160                        goto out_nolock;
4161                }
4162        }
4163
4164        mutex_lock(&adm_ctx.resource->adm_mutex);
4165        mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4166
4167        if (!get_ldev(device)) {
4168                retcode = ERR_NO_DISK;
4169                goto out;
4170        }
4171
4172        /* this is "skip initial sync", assume to be clean */
4173        if (device->state.conn == C_CONNECTED &&
4174            first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4175            device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4176                drbd_info(device, "Preparing to skip initial sync\n");
4177                skip_initial_sync = 1;
4178        } else if (device->state.conn != C_STANDALONE) {
4179                retcode = ERR_CONNECTED;
4180                goto out_dec;
4181        }
4182
4183        drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4184        drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4185
4186        if (args.clear_bm) {
4187                err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4188                        "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
4189                if (err) {
4190                        drbd_err(device, "Writing bitmap failed with %d\n", err);
4191                        retcode = ERR_IO_MD_DISK;
4192                }
4193                if (skip_initial_sync) {
4194                        drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4195                        _drbd_uuid_set(device, UI_BITMAP, 0);
4196                        drbd_print_uuids(device, "cleared bitmap UUID");
4197                        spin_lock_irq(&device->resource->req_lock);
4198                        _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4199                                        CS_VERBOSE, NULL);
4200                        spin_unlock_irq(&device->resource->req_lock);
4201                }
4202        }
4203
4204        drbd_md_sync(device);
4205out_dec:
4206        put_ldev(device);
4207out:
4208        mutex_unlock(device->state_mutex);
4209        mutex_unlock(&adm_ctx.resource->adm_mutex);
4210out_nolock:
4211        drbd_adm_finish(&adm_ctx, info, retcode);
4212        return 0;
4213}
4214
4215static enum drbd_ret_code
4216drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4217{
4218        const char *name = adm_ctx->resource_name;
4219        if (!name || !name[0]) {
4220                drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4221                return ERR_MANDATORY_TAG;
4222        }
4223        /* if we want to use these in sysfs/configfs/debugfs some day,
4224         * we must not allow slashes */
4225        if (strchr(name, '/')) {
4226                drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4227                return ERR_INVALID_REQUEST;
4228        }
4229        return NO_ERROR;
4230}
4231
4232static void resource_to_info(struct resource_info *info,
4233                             struct drbd_resource *resource)
4234{
4235        info->res_role = conn_highest_role(first_connection(resource));
4236        info->res_susp = resource->susp;
4237        info->res_susp_nod = resource->susp_nod;
4238        info->res_susp_fen = resource->susp_fen;
4239}
4240
4241int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4242{
4243        struct drbd_connection *connection;
4244        struct drbd_config_context adm_ctx;
4245        enum drbd_ret_code retcode;
4246        struct res_opts res_opts;
4247        int err;
4248
4249        retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
4250        if (!adm_ctx.reply_skb)
4251                return retcode;
4252        if (retcode != NO_ERROR)
4253                goto out;
4254
4255        set_res_opts_defaults(&res_opts);
4256        err = res_opts_from_attrs(&res_opts, info);
4257        if (err && err != -ENOMSG) {
4258                retcode = ERR_MANDATORY_TAG;
4259                drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4260                goto out;
4261        }
4262
4263        retcode = drbd_check_resource_name(&adm_ctx);
4264        if (retcode != NO_ERROR)
4265                goto out;
4266
4267        if (adm_ctx.resource) {
4268                if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4269                        retcode = ERR_INVALID_REQUEST;
4270                        drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
4271                }
4272                /* else: still NO_ERROR */
4273                goto out;
4274        }
4275
4276        /* not yet safe for genl_family.parallel_ops */
4277        mutex_lock(&resources_mutex);
4278        connection = conn_create(adm_ctx.resource_name, &res_opts);
4279        mutex_unlock(&resources_mutex);
4280
4281        if (connection) {
4282                struct resource_info resource_info;
4283
4284                mutex_lock(&notification_mutex);
4285                resource_to_info(&resource_info, connection->resource);
4286                notify_resource_state(NULL, 0, connection->resource,
4287                                      &resource_info, NOTIFY_CREATE);
4288                mutex_unlock(&notification_mutex);
4289        } else
4290                retcode = ERR_NOMEM;
4291
4292out:
4293        drbd_adm_finish(&adm_ctx, info, retcode);
4294        return 0;
4295}
4296
4297static void device_to_info(struct device_info *info,
4298                           struct drbd_device *device)
4299{
4300        info->dev_disk_state = device->state.disk;
4301}
4302
4303
4304int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4305{
4306        struct drbd_config_context adm_ctx;
4307        struct drbd_genlmsghdr *dh = info->userhdr;
4308        enum drbd_ret_code retcode;
4309
4310        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4311        if (!adm_ctx.reply_skb)
4312                return retcode;
4313        if (retcode != NO_ERROR)
4314                goto out;
4315
4316        if (dh->minor > MINORMASK) {
4317                drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
4318                retcode = ERR_INVALID_REQUEST;
4319                goto out;
4320        }
4321        if (adm_ctx.volume > DRBD_VOLUME_MAX) {
4322                drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
4323                retcode = ERR_INVALID_REQUEST;
4324                goto out;
4325        }
4326
4327        /* drbd_adm_prepare made sure already
4328         * that first_peer_device(device)->connection and device->vnr match the request. */
4329        if (adm_ctx.device) {
4330                if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4331                        retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4332                /* else: still NO_ERROR */
4333                goto out;
4334        }
4335
4336        mutex_lock(&adm_ctx.resource->adm_mutex);
4337        retcode = drbd_create_device(&adm_ctx, dh->minor);
4338        if (retcode == NO_ERROR) {
4339                struct drbd_device *device;
4340                struct drbd_peer_device *peer_device;
4341                struct device_info info;
4342                unsigned int peer_devices = 0;
4343                enum drbd_notification_type flags;
4344
4345                device = minor_to_device(dh->minor);
4346                for_each_peer_device(peer_device, device) {
4347                        if (!has_net_conf(peer_device->connection))
4348                                continue;
4349                        peer_devices++;
4350                }
4351
4352                device_to_info(&info, device);
4353                mutex_lock(&notification_mutex);
4354                flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4355                notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4356                for_each_peer_device(peer_device, device) {
4357                        struct peer_device_info peer_device_info;
4358
4359                        if (!has_net_conf(peer_device->connection))
4360                                continue;
4361                        peer_device_to_info(&peer_device_info, peer_device);
4362                        flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4363                        notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4364                                                 NOTIFY_CREATE | flags);
4365                }
4366                mutex_unlock(&notification_mutex);
4367        }
4368        mutex_unlock(&adm_ctx.resource->adm_mutex);
4369out:
4370        drbd_adm_finish(&adm_ctx, info, retcode);
4371        return 0;
4372}
4373
4374static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4375{
4376        struct drbd_peer_device *peer_device;
4377
4378        if (device->state.disk == D_DISKLESS &&
4379            /* no need to be device->state.conn == C_STANDALONE &&
4380             * we may want to delete a minor from a live replication group.
4381             */
4382            device->state.role == R_SECONDARY) {
4383                struct drbd_connection *connection =
4384                        first_connection(device->resource);
4385
4386                _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4387                                    CS_VERBOSE + CS_WAIT_COMPLETE);
4388
4389                /* If the state engine hasn't stopped the sender thread yet, we
4390                 * need to flush the sender work queue before generating the
4391                 * DESTROY events here. */
4392                if (get_t_state(&connection->worker) == RUNNING)
4393                        drbd_flush_workqueue(&connection->sender_work);
4394
4395                mutex_lock(&notification_mutex);
4396                for_each_peer_device(peer_device, device) {
4397                        if (!has_net_conf(peer_device->connection))
4398                                continue;
4399                        notify_peer_device_state(NULL, 0, peer_device, NULL,
4400                                                 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4401                }
4402                notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4403                mutex_unlock(&notification_mutex);
4404
4405                drbd_delete_device(device);
4406                return NO_ERROR;
4407        } else
4408                return ERR_MINOR_CONFIGURED;
4409}
4410
4411int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4412{
4413        struct drbd_config_context adm_ctx;
4414        enum drbd_ret_code retcode;
4415
4416        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4417        if (!adm_ctx.reply_skb)
4418                return retcode;
4419        if (retcode != NO_ERROR)
4420                goto out;
4421
4422        mutex_lock(&adm_ctx.resource->adm_mutex);
4423        retcode = adm_del_minor(adm_ctx.device);
4424        mutex_unlock(&adm_ctx.resource->adm_mutex);
4425out:
4426        drbd_adm_finish(&adm_ctx, info, retcode);
4427        return 0;
4428}
4429
4430static int adm_del_resource(struct drbd_resource *resource)
4431{
4432        struct drbd_connection *connection;
4433
4434        for_each_connection(connection, resource) {
4435                if (connection->cstate > C_STANDALONE)
4436                        return ERR_NET_CONFIGURED;
4437        }
4438        if (!idr_is_empty(&resource->devices))
4439                return ERR_RES_IN_USE;
4440
4441        /* The state engine has stopped the sender thread, so we don't
4442         * need to flush the sender work queue before generating the
4443         * DESTROY event here. */
4444        mutex_lock(&notification_mutex);
4445        notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4446        mutex_unlock(&notification_mutex);
4447
4448        mutex_lock(&resources_mutex);
4449        list_del_rcu(&resource->resources);
4450        mutex_unlock(&resources_mutex);
4451        /* Make sure all threads have actually stopped: state handling only
4452         * does drbd_thread_stop_nowait(). */
4453        list_for_each_entry(connection, &resource->connections, connections)
4454                drbd_thread_stop(&connection->worker);
4455        synchronize_rcu();
4456        drbd_free_resource(resource);
4457        return NO_ERROR;
4458}
4459
4460int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4461{
4462        struct drbd_config_context adm_ctx;
4463        struct drbd_resource *resource;
4464        struct drbd_connection *connection;
4465        struct drbd_device *device;
4466        int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4467        unsigned i;
4468
4469        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4470        if (!adm_ctx.reply_skb)
4471                return retcode;
4472        if (retcode != NO_ERROR)
4473                goto finish;
4474
4475        resource = adm_ctx.resource;
4476        mutex_lock(&resource->adm_mutex);
4477        /* demote */
4478        for_each_connection(connection, resource) {
4479                struct drbd_peer_device *peer_device;
4480
4481                idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4482                        retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4483                        if (retcode < SS_SUCCESS) {
4484                                drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
4485                                goto out;
4486                        }
4487                }
4488
4489                retcode = conn_try_disconnect(connection, 0);
4490                if (retcode < SS_SUCCESS) {
4491                        drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
4492                        goto out;
4493                }
4494        }
4495
4496        /* detach */
4497        idr_for_each_entry(&resource->devices, device, i) {
4498                retcode = adm_detach(device, 0);
4499                if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4500                        drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
4501                        goto out;
4502                }
4503        }
4504
4505        /* delete volumes */
4506        idr_for_each_entry(&resource->devices, device, i) {
4507                retcode = adm_del_minor(device);
4508                if (retcode != NO_ERROR) {
4509                        /* "can not happen" */
4510                        drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
4511                        goto out;
4512                }
4513        }
4514
4515        retcode = adm_del_resource(resource);
4516out:
4517        mutex_unlock(&resource->adm_mutex);
4518finish:
4519        drbd_adm_finish(&adm_ctx, info, retcode);
4520        return 0;
4521}
4522
4523int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4524{
4525        struct drbd_config_context adm_ctx;
4526        struct drbd_resource *resource;
4527        enum drbd_ret_code retcode;
4528
4529        retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4530        if (!adm_ctx.reply_skb)
4531                return retcode;
4532        if (retcode != NO_ERROR)
4533                goto finish;
4534        resource = adm_ctx.resource;
4535
4536        mutex_lock(&resource->adm_mutex);
4537        retcode = adm_del_resource(resource);
4538        mutex_unlock(&resource->adm_mutex);
4539finish:
4540        drbd_adm_finish(&adm_ctx, info, retcode);
4541        return 0;
4542}
4543
4544void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4545{
4546        struct sk_buff *msg;
4547        struct drbd_genlmsghdr *d_out;
4548        unsigned seq;
4549        int err = -ENOMEM;
4550
4551        seq = atomic_inc_return(&drbd_genl_seq);
4552        msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4553        if (!msg)
4554                goto failed;
4555
4556        err = -EMSGSIZE;
4557        d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4558        if (!d_out) /* cannot happen, but anyways. */
4559                goto nla_put_failure;
4560        d_out->minor = device_to_minor(device);
4561        d_out->ret_code = NO_ERROR;
4562
4563        if (nla_put_status_info(msg, device, sib))
4564                goto nla_put_failure;
4565        genlmsg_end(msg, d_out);
4566        err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4567        /* msg has been consumed or freed in netlink_broadcast() */
4568        if (err && err != -ESRCH)
4569                goto failed;
4570
4571        return;
4572
4573nla_put_failure:
4574        nlmsg_free(msg);
4575failed:
4576        drbd_err(device, "Error %d while broadcasting event. "
4577                        "Event seq:%u sib_reason:%u\n",
4578                        err, seq, sib->sib_reason);
4579}
4580
4581static int nla_put_notification_header(struct sk_buff *msg,
4582                                       enum drbd_notification_type type)
4583{
4584        struct drbd_notification_header nh = {
4585                .nh_type = type,
4586        };
4587
4588        return drbd_notification_header_to_skb(msg, &nh, true);
4589}
4590
4591void notify_resource_state(struct sk_buff *skb,
4592                           unsigned int seq,
4593                           struct drbd_resource *resource,
4594                           struct resource_info *resource_info,
4595                           enum drbd_notification_type type)
4596{
4597        struct resource_statistics resource_statistics;
4598        struct drbd_genlmsghdr *dh;
4599        bool multicast = false;
4600        int err;
4601
4602        if (!skb) {
4603                seq = atomic_inc_return(&notify_genl_seq);
4604                skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4605                err = -ENOMEM;
4606                if (!skb)
4607                        goto failed;
4608                multicast = true;
4609        }
4610
4611        err = -EMSGSIZE;
4612        dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4613        if (!dh)
4614                goto nla_put_failure;
4615        dh->minor = -1U;
4616        dh->ret_code = NO_ERROR;
4617        if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4618            nla_put_notification_header(skb, type) ||
4619            ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4620             resource_info_to_skb(skb, resource_info, true)))
4621                goto nla_put_failure;
4622        resource_statistics.res_stat_write_ordering = resource->write_ordering;
4623        err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4624        if (err)
4625                goto nla_put_failure;
4626        genlmsg_end(skb, dh);
4627        if (multicast) {
4628                err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4629                /* skb has been consumed or freed in netlink_broadcast() */
4630                if (err && err != -ESRCH)
4631                        goto failed;
4632        }
4633        return;
4634
4635nla_put_failure:
4636        nlmsg_free(skb);
4637failed:
4638        drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4639                        err, seq);
4640}
4641
4642void notify_device_state(struct sk_buff *skb,
4643                         unsigned int seq,
4644                         struct drbd_device *device,
4645                         struct device_info *device_info,
4646                         enum drbd_notification_type type)
4647{
4648        struct device_statistics device_statistics;
4649        struct drbd_genlmsghdr *dh;
4650        bool multicast = false;
4651        int err;
4652
4653        if (!skb) {
4654                seq = atomic_inc_return(&notify_genl_seq);
4655                skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4656                err = -ENOMEM;
4657                if (!skb)
4658                        goto failed;
4659                multicast = true;
4660        }
4661
4662        err = -EMSGSIZE;
4663        dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4664        if (!dh)
4665                goto nla_put_failure;
4666        dh->minor = device->minor;
4667        dh->ret_code = NO_ERROR;
4668        if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4669            nla_put_notification_header(skb, type) ||
4670            ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4671             device_info_to_skb(skb, device_info, true)))
4672                goto nla_put_failure;
4673        device_to_statistics(&device_statistics, device);
4674        device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4675        genlmsg_end(skb, dh);
4676        if (multicast) {
4677                err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4678                /* skb has been consumed or freed in netlink_broadcast() */
4679                if (err && err != -ESRCH)
4680                        goto failed;
4681        }
4682        return;
4683
4684nla_put_failure:
4685        nlmsg_free(skb);
4686failed:
4687        drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4688                 err, seq);
4689}
4690
4691void notify_connection_state(struct sk_buff *skb,
4692                             unsigned int seq,
4693                             struct drbd_connection *connection,
4694                             struct connection_info *connection_info,
4695                             enum drbd_notification_type type)
4696{
4697        struct connection_statistics connection_statistics;
4698        struct drbd_genlmsghdr *dh;
4699        bool multicast = false;
4700        int err;
4701
4702        if (!skb) {
4703                seq = atomic_inc_return(&notify_genl_seq);
4704                skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4705                err = -ENOMEM;
4706                if (!skb)
4707                        goto failed;
4708                multicast = true;
4709        }
4710
4711        err = -EMSGSIZE;
4712        dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4713        if (!dh)
4714                goto nla_put_failure;
4715        dh->minor = -1U;
4716        dh->ret_code = NO_ERROR;
4717        if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4718            nla_put_notification_header(skb, type) ||
4719            ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4720             connection_info_to_skb(skb, connection_info, true)))
4721                goto nla_put_failure;
4722        connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4723        connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4724        genlmsg_end(skb, dh);
4725        if (multicast) {
4726                err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4727                /* skb has been consumed or freed in netlink_broadcast() */
4728                if (err && err != -ESRCH)
4729                        goto failed;
4730        }
4731        return;
4732
4733nla_put_failure:
4734        nlmsg_free(skb);
4735failed:
4736        drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4737                 err, seq);
4738}
4739
4740void notify_peer_device_state(struct sk_buff *skb,
4741                              unsigned int seq,
4742                              struct drbd_peer_device *peer_device,
4743                              struct peer_device_info *peer_device_info,
4744                              enum drbd_notification_type type)
4745{
4746        struct peer_device_statistics peer_device_statistics;
4747        struct drbd_resource *resource = peer_device->device->resource;
4748        struct drbd_genlmsghdr *dh;
4749        bool multicast = false;
4750        int err;
4751
4752        if (!skb) {
4753                seq = atomic_inc_return(&notify_genl_seq);
4754                skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4755                err = -ENOMEM;
4756                if (!skb)
4757                        goto failed;
4758                multicast = true;
4759        }
4760
4761        err = -EMSGSIZE;
4762        dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4763        if (!dh)
4764                goto nla_put_failure;
4765        dh->minor = -1U;
4766        dh->ret_code = NO_ERROR;
4767        if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4768            nla_put_notification_header(skb, type) ||
4769            ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4770             peer_device_info_to_skb(skb, peer_device_info, true)))
4771                goto nla_put_failure;
4772        peer_device_to_statistics(&peer_device_statistics, peer_device);
4773        peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4774        genlmsg_end(skb, dh);
4775        if (multicast) {
4776                err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4777                /* skb has been consumed or freed in netlink_broadcast() */
4778                if (err && err != -ESRCH)
4779                        goto failed;
4780        }
4781        return;
4782
4783nla_put_failure:
4784        nlmsg_free(skb);
4785failed:
4786        drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4787                 err, seq);
4788}
4789
4790void notify_helper(enum drbd_notification_type type,
4791                   struct drbd_device *device, struct drbd_connection *connection,
4792                   const char *name, int status)
4793{
4794        struct drbd_resource *resource = device ? device->resource : connection->resource;
4795        struct drbd_helper_info helper_info;
4796        unsigned int seq = atomic_inc_return(&notify_genl_seq);
4797        struct sk_buff *skb = NULL;
4798        struct drbd_genlmsghdr *dh;
4799        int err;
4800
4801        strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4802        helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4803        helper_info.helper_status = status;
4804
4805        skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4806        err = -ENOMEM;
4807        if (!skb)
4808                goto fail;
4809
4810        err = -EMSGSIZE;
4811        dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4812        if (!dh)
4813                goto fail;
4814        dh->minor = device ? device->minor : -1;
4815        dh->ret_code = NO_ERROR;
4816        mutex_lock(&notification_mutex);
4817        if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4818            nla_put_notification_header(skb, type) ||
4819            drbd_helper_info_to_skb(skb, &helper_info, true))
4820                goto unlock_fail;
4821        genlmsg_end(skb, dh);
4822        err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4823        skb = NULL;
4824        /* skb has been consumed or freed in netlink_broadcast() */
4825        if (err && err != -ESRCH)
4826                goto unlock_fail;
4827        mutex_unlock(&notification_mutex);
4828        return;
4829
4830unlock_fail:
4831        mutex_unlock(&notification_mutex);
4832fail:
4833        nlmsg_free(skb);
4834        drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4835                 err, seq);
4836}
4837
4838static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4839{
4840        struct drbd_genlmsghdr *dh;
4841        int err;
4842
4843        err = -EMSGSIZE;
4844        dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4845        if (!dh)
4846                goto nla_put_failure;
4847        dh->minor = -1U;
4848        dh->ret_code = NO_ERROR;
4849        if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4850                goto nla_put_failure;
4851        genlmsg_end(skb, dh);
4852        return;
4853
4854nla_put_failure:
4855        nlmsg_free(skb);
4856        pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4857}
4858
4859static void free_state_changes(struct list_head *list)
4860{
4861        while (!list_empty(list)) {
4862                struct drbd_state_change *state_change =
4863                        list_first_entry(list, struct drbd_state_change, list);
4864                list_del(&state_change->list);
4865                forget_state_change(state_change);
4866        }
4867}
4868
4869static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4870{
4871        return 1 +
4872               state_change->n_connections +
4873               state_change->n_devices +
4874               state_change->n_devices * state_change->n_connections;
4875}
4876
4877static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4878{
4879        struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4880        unsigned int seq = cb->args[2];
4881        unsigned int n;
4882        enum drbd_notification_type flags = 0;
4883
4884        /* There is no need for taking notification_mutex here: it doesn't
4885           matter if the initial state events mix with later state chage
4886           events; we can always tell the events apart by the NOTIFY_EXISTS
4887           flag. */
4888
4889        cb->args[5]--;
4890        if (cb->args[5] == 1) {
4891                notify_initial_state_done(skb, seq);
4892                goto out;
4893        }
4894        n = cb->args[4]++;
4895        if (cb->args[4] < cb->args[3])
4896                flags |= NOTIFY_CONTINUES;
4897        if (n < 1) {
4898                notify_resource_state_change(skb, seq, state_change->resource,
4899                                             NOTIFY_EXISTS | flags);
4900                goto next;
4901        }
4902        n--;
4903        if (n < state_change->n_connections) {
4904                notify_connection_state_change(skb, seq, &state_change->connections[n],
4905                                               NOTIFY_EXISTS | flags);
4906                goto next;
4907        }
4908        n -= state_change->n_connections;
4909        if (n < state_change->n_devices) {
4910                notify_device_state_change(skb, seq, &state_change->devices[n],
4911                                           NOTIFY_EXISTS | flags);
4912                goto next;
4913        }
4914        n -= state_change->n_devices;
4915        if (n < state_change->n_devices * state_change->n_connections) {
4916                notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4917                                                NOTIFY_EXISTS | flags);
4918                goto next;
4919        }
4920
4921next:
4922        if (cb->args[4] == cb->args[3]) {
4923                struct drbd_state_change *next_state_change =
4924                        list_entry(state_change->list.next,
4925                                   struct drbd_state_change, list);
4926                cb->args[0] = (long)next_state_change;
4927                cb->args[3] = notifications_for_state_change(next_state_change);
4928                cb->args[4] = 0;
4929        }
4930out:
4931        return skb->len;
4932}
4933
4934int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4935{
4936        struct drbd_resource *resource;
4937        LIST_HEAD(head);
4938
4939        if (cb->args[5] >= 1) {
4940                if (cb->args[5] > 1)
4941                        return get_initial_state(skb, cb);
4942                if (cb->args[0]) {
4943                        struct drbd_state_change *state_change =
4944                                (struct drbd_state_change *)cb->args[0];
4945
4946                        /* connect list to head */
4947                        list_add(&head, &state_change->list);
4948                        free_state_changes(&head);
4949                }
4950                return 0;
4951        }
4952
4953        cb->args[5] = 2;  /* number of iterations */
4954        mutex_lock(&resources_mutex);
4955        for_each_resource(resource, &drbd_resources) {
4956                struct drbd_state_change *state_change;
4957
4958                state_change = remember_old_state(resource, GFP_KERNEL);
4959                if (!state_change) {
4960                        if (!list_empty(&head))
4961                                free_state_changes(&head);
4962                        mutex_unlock(&resources_mutex);
4963                        return -ENOMEM;
4964                }
4965                copy_old_to_new_state_change(state_change);
4966                list_add_tail(&state_change->list, &head);
4967                cb->args[5] += notifications_for_state_change(state_change);
4968        }
4969        mutex_unlock(&resources_mutex);
4970
4971        if (!list_empty(&head)) {
4972                struct drbd_state_change *state_change =
4973                        list_entry(head.next, struct drbd_state_change, list);
4974                cb->args[0] = (long)state_change;
4975                cb->args[3] = notifications_for_state_change(state_change);
4976                list_del(&head);  /* detach list from head */
4977        }
4978
4979        cb->args[2] = cb->nlh->nlmsg_seq;
4980        return get_initial_state(skb, cb);
4981}
4982