linux/fs/ksmbd/connection.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *   Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
   4 *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
   5 */
   6
   7#include <linux/mutex.h>
   8#include <linux/freezer.h>
   9#include <linux/module.h>
  10
  11#include "server.h"
  12#include "smb_common.h"
  13#include "mgmt/ksmbd_ida.h"
  14#include "connection.h"
  15#include "transport_tcp.h"
  16#include "transport_rdma.h"
  17
  18static DEFINE_MUTEX(init_lock);
  19
  20static struct ksmbd_conn_ops default_conn_ops;
  21
  22LIST_HEAD(conn_list);
  23DEFINE_RWLOCK(conn_list_lock);
  24
  25/**
  26 * ksmbd_conn_free() - free resources of the connection instance
  27 *
  28 * @conn:       connection instance to be cleand up
  29 *
  30 * During the thread termination, the corresponding conn instance
  31 * resources(sock/memory) are released and finally the conn object is freed.
  32 */
  33void ksmbd_conn_free(struct ksmbd_conn *conn)
  34{
  35        write_lock(&conn_list_lock);
  36        list_del(&conn->conns_list);
  37        write_unlock(&conn_list_lock);
  38
  39        kvfree(conn->request_buf);
  40        kfree(conn->preauth_info);
  41        kfree(conn);
  42}
  43
  44/**
  45 * ksmbd_conn_alloc() - initialize a new connection instance
  46 *
  47 * Return:      ksmbd_conn struct on success, otherwise NULL
  48 */
  49struct ksmbd_conn *ksmbd_conn_alloc(void)
  50{
  51        struct ksmbd_conn *conn;
  52
  53        conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
  54        if (!conn)
  55                return NULL;
  56
  57        conn->need_neg = true;
  58        conn->status = KSMBD_SESS_NEW;
  59        conn->local_nls = load_nls("utf8");
  60        if (!conn->local_nls)
  61                conn->local_nls = load_nls_default();
  62        atomic_set(&conn->req_running, 0);
  63        atomic_set(&conn->r_count, 0);
  64        conn->total_credits = 1;
  65        conn->outstanding_credits = 1;
  66
  67        init_waitqueue_head(&conn->req_running_q);
  68        INIT_LIST_HEAD(&conn->conns_list);
  69        INIT_LIST_HEAD(&conn->sessions);
  70        INIT_LIST_HEAD(&conn->requests);
  71        INIT_LIST_HEAD(&conn->async_requests);
  72        spin_lock_init(&conn->request_lock);
  73        spin_lock_init(&conn->credits_lock);
  74        ida_init(&conn->async_ida);
  75
  76        spin_lock_init(&conn->llist_lock);
  77        INIT_LIST_HEAD(&conn->lock_list);
  78
  79        write_lock(&conn_list_lock);
  80        list_add(&conn->conns_list, &conn_list);
  81        write_unlock(&conn_list_lock);
  82        return conn;
  83}
  84
  85bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
  86{
  87        struct ksmbd_conn *t;
  88        bool ret = false;
  89
  90        read_lock(&conn_list_lock);
  91        list_for_each_entry(t, &conn_list, conns_list) {
  92                if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
  93                        continue;
  94
  95                ret = true;
  96                break;
  97        }
  98        read_unlock(&conn_list_lock);
  99        return ret;
 100}
 101
 102void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
 103{
 104        struct ksmbd_conn *conn = work->conn;
 105        struct list_head *requests_queue = NULL;
 106
 107        if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
 108                requests_queue = &conn->requests;
 109                work->syncronous = true;
 110        }
 111
 112        if (requests_queue) {
 113                atomic_inc(&conn->req_running);
 114                spin_lock(&conn->request_lock);
 115                list_add_tail(&work->request_entry, requests_queue);
 116                spin_unlock(&conn->request_lock);
 117        }
 118}
 119
 120int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
 121{
 122        struct ksmbd_conn *conn = work->conn;
 123        int ret = 1;
 124
 125        if (list_empty(&work->request_entry) &&
 126            list_empty(&work->async_request_entry))
 127                return 0;
 128
 129        if (!work->multiRsp)
 130                atomic_dec(&conn->req_running);
 131        spin_lock(&conn->request_lock);
 132        if (!work->multiRsp) {
 133                list_del_init(&work->request_entry);
 134                if (work->syncronous == false)
 135                        list_del_init(&work->async_request_entry);
 136                ret = 0;
 137        }
 138        spin_unlock(&conn->request_lock);
 139
 140        wake_up_all(&conn->req_running_q);
 141        return ret;
 142}
 143
 144static void ksmbd_conn_lock(struct ksmbd_conn *conn)
 145{
 146        mutex_lock(&conn->srv_mutex);
 147}
 148
 149static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
 150{
 151        mutex_unlock(&conn->srv_mutex);
 152}
 153
 154void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
 155{
 156        wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
 157}
 158
 159int ksmbd_conn_write(struct ksmbd_work *work)
 160{
 161        struct ksmbd_conn *conn = work->conn;
 162        size_t len = 0;
 163        int sent;
 164        struct kvec iov[3];
 165        int iov_idx = 0;
 166
 167        ksmbd_conn_try_dequeue_request(work);
 168        if (!work->response_buf) {
 169                pr_err("NULL response header\n");
 170                return -EINVAL;
 171        }
 172
 173        if (work->tr_buf) {
 174                iov[iov_idx] = (struct kvec) { work->tr_buf,
 175                                sizeof(struct smb2_transform_hdr) + 4 };
 176                len += iov[iov_idx++].iov_len;
 177        }
 178
 179        if (work->aux_payload_sz) {
 180                iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
 181                len += iov[iov_idx++].iov_len;
 182                iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
 183                len += iov[iov_idx++].iov_len;
 184        } else {
 185                if (work->tr_buf)
 186                        iov[iov_idx].iov_len = work->resp_hdr_sz;
 187                else
 188                        iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
 189                iov[iov_idx].iov_base = work->response_buf;
 190                len += iov[iov_idx++].iov_len;
 191        }
 192
 193        ksmbd_conn_lock(conn);
 194        sent = conn->transport->ops->writev(conn->transport, &iov[0],
 195                                        iov_idx, len,
 196                                        work->need_invalidate_rkey,
 197                                        work->remote_key);
 198        ksmbd_conn_unlock(conn);
 199
 200        if (sent < 0) {
 201                pr_err("Failed to send message: %d\n", sent);
 202                return sent;
 203        }
 204
 205        return 0;
 206}
 207
 208int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
 209                         unsigned int buflen, u32 remote_key, u64 remote_offset,
 210                         u32 remote_len)
 211{
 212        int ret = -EINVAL;
 213
 214        if (conn->transport->ops->rdma_read)
 215                ret = conn->transport->ops->rdma_read(conn->transport,
 216                                                      buf, buflen,
 217                                                      remote_key, remote_offset,
 218                                                      remote_len);
 219        return ret;
 220}
 221
 222int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
 223                          unsigned int buflen, u32 remote_key,
 224                          u64 remote_offset, u32 remote_len)
 225{
 226        int ret = -EINVAL;
 227
 228        if (conn->transport->ops->rdma_write)
 229                ret = conn->transport->ops->rdma_write(conn->transport,
 230                                                       buf, buflen,
 231                                                       remote_key, remote_offset,
 232                                                       remote_len);
 233        return ret;
 234}
 235
 236bool ksmbd_conn_alive(struct ksmbd_conn *conn)
 237{
 238        if (!ksmbd_server_running())
 239                return false;
 240
 241        if (conn->status == KSMBD_SESS_EXITING)
 242                return false;
 243
 244        if (kthread_should_stop())
 245                return false;
 246
 247        if (atomic_read(&conn->stats.open_files_count) > 0)
 248                return true;
 249
 250        /*
 251         * Stop current session if the time that get last request from client
 252         * is bigger than deadtime user configured and opening file count is
 253         * zero.
 254         */
 255        if (server_conf.deadtime > 0 &&
 256            time_after(jiffies, conn->last_active + server_conf.deadtime)) {
 257                ksmbd_debug(CONN, "No response from client in %lu minutes\n",
 258                            server_conf.deadtime / SMB_ECHO_INTERVAL);
 259                return false;
 260        }
 261        return true;
 262}
 263
 264/**
 265 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
 266 * @p:          connection instance
 267 *
 268 * One thread each per connection
 269 *
 270 * Return:      0 on success
 271 */
 272int ksmbd_conn_handler_loop(void *p)
 273{
 274        struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
 275        struct ksmbd_transport *t = conn->transport;
 276        unsigned int pdu_size;
 277        char hdr_buf[4] = {0,};
 278        int size;
 279
 280        mutex_init(&conn->srv_mutex);
 281        __module_get(THIS_MODULE);
 282
 283        if (t->ops->prepare && t->ops->prepare(t))
 284                goto out;
 285
 286        conn->last_active = jiffies;
 287        while (ksmbd_conn_alive(conn)) {
 288                if (try_to_freeze())
 289                        continue;
 290
 291                kvfree(conn->request_buf);
 292                conn->request_buf = NULL;
 293
 294                size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
 295                if (size != sizeof(hdr_buf))
 296                        break;
 297
 298                pdu_size = get_rfc1002_len(hdr_buf);
 299                ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
 300
 301                /*
 302                 * Check if pdu size is valid (min : smb header size,
 303                 * max : 0x00FFFFFF).
 304                 */
 305                if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
 306                    pdu_size > MAX_STREAM_PROT_LEN) {
 307                        continue;
 308                }
 309
 310                /* 4 for rfc1002 length field */
 311                size = pdu_size + 4;
 312                conn->request_buf = kvmalloc(size, GFP_KERNEL);
 313                if (!conn->request_buf)
 314                        continue;
 315
 316                memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
 317                if (!ksmbd_smb_request(conn))
 318                        break;
 319
 320                /*
 321                 * We already read 4 bytes to find out PDU size, now
 322                 * read in PDU
 323                 */
 324                size = t->ops->read(t, conn->request_buf + 4, pdu_size);
 325                if (size < 0) {
 326                        pr_err("sock_read failed: %d\n", size);
 327                        break;
 328                }
 329
 330                if (size != pdu_size) {
 331                        pr_err("PDU error. Read: %d, Expected: %d\n",
 332                               size, pdu_size);
 333                        continue;
 334                }
 335
 336                if (!default_conn_ops.process_fn) {
 337                        pr_err("No connection request callback\n");
 338                        break;
 339                }
 340
 341                if (default_conn_ops.process_fn(conn)) {
 342                        pr_err("Cannot handle request\n");
 343                        break;
 344                }
 345        }
 346
 347out:
 348        /* Wait till all reference dropped to the Server object*/
 349        while (atomic_read(&conn->r_count) > 0)
 350                schedule_timeout(HZ);
 351
 352        unload_nls(conn->local_nls);
 353        if (default_conn_ops.terminate_fn)
 354                default_conn_ops.terminate_fn(conn);
 355        t->ops->disconnect(t);
 356        module_put(THIS_MODULE);
 357        return 0;
 358}
 359
 360void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
 361{
 362        default_conn_ops.process_fn = ops->process_fn;
 363        default_conn_ops.terminate_fn = ops->terminate_fn;
 364}
 365
 366int ksmbd_conn_transport_init(void)
 367{
 368        int ret;
 369
 370        mutex_lock(&init_lock);
 371        ret = ksmbd_tcp_init();
 372        if (ret) {
 373                pr_err("Failed to init TCP subsystem: %d\n", ret);
 374                goto out;
 375        }
 376
 377        ret = ksmbd_rdma_init();
 378        if (ret) {
 379                pr_err("Failed to init RDMA subsystem: %d\n", ret);
 380                goto out;
 381        }
 382out:
 383        mutex_unlock(&init_lock);
 384        return ret;
 385}
 386
 387static void stop_sessions(void)
 388{
 389        struct ksmbd_conn *conn;
 390        struct ksmbd_transport *t;
 391
 392again:
 393        read_lock(&conn_list_lock);
 394        list_for_each_entry(conn, &conn_list, conns_list) {
 395                struct task_struct *task;
 396
 397                t = conn->transport;
 398                task = t->handler;
 399                if (task)
 400                        ksmbd_debug(CONN, "Stop session handler %s/%d\n",
 401                                    task->comm, task_pid_nr(task));
 402                conn->status = KSMBD_SESS_EXITING;
 403                if (t->ops->shutdown) {
 404                        read_unlock(&conn_list_lock);
 405                        t->ops->shutdown(t);
 406                        read_lock(&conn_list_lock);
 407                }
 408        }
 409        read_unlock(&conn_list_lock);
 410
 411        if (!list_empty(&conn_list)) {
 412                schedule_timeout_interruptible(HZ / 10); /* 100ms */
 413                goto again;
 414        }
 415}
 416
 417void ksmbd_conn_transport_destroy(void)
 418{
 419        mutex_lock(&init_lock);
 420        ksmbd_tcp_destroy();
 421        ksmbd_rdma_destroy();
 422        stop_sessions();
 423        mutex_unlock(&init_lock);
 424}
 425