linux/net/smc/smc_cdc.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
   4 *
   5 * Connection Data Control (CDC)
   6 *
   7 * Copyright IBM Corp. 2016
   8 *
   9 * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
  10 */
  11
  12#ifndef SMC_CDC_H
  13#define SMC_CDC_H
  14
  15#include <linux/kernel.h> /* max_t */
  16#include <linux/atomic.h>
  17#include <linux/in.h>
  18#include <linux/compiler.h>
  19
  20#include "smc.h"
  21#include "smc_core.h"
  22#include "smc_wr.h"
  23
  24#define SMC_CDC_MSG_TYPE                0xFE
  25
  26/* in network byte order */
  27union smc_cdc_cursor {          /* SMC cursor */
  28        struct {
  29                __be16  reserved;
  30                __be16  wrap;
  31                __be32  count;
  32        };
  33#ifdef KERNEL_HAS_ATOMIC64
  34        atomic64_t      acurs;          /* for atomic processing */
  35#else
  36        u64             acurs;          /* for atomic processing */
  37#endif
  38} __aligned(8);
  39
  40/* in network byte order */
  41struct smc_cdc_msg {
  42        struct smc_wr_rx_hdr            common; /* .type = 0xFE */
  43        u8                              len;    /* 44 */
  44        __be16                          seqno;
  45        __be32                          token;
  46        union smc_cdc_cursor            prod;
  47        union smc_cdc_cursor            cons;   /* piggy backed "ack" */
  48        struct smc_cdc_producer_flags   prod_flags;
  49        struct smc_cdc_conn_state_flags conn_state_flags;
  50        u8                              reserved[18];
  51};
  52
  53/* SMC-D cursor format */
  54union smcd_cdc_cursor {
  55        struct {
  56                u16     wrap;
  57                u32     count;
  58                struct smc_cdc_producer_flags   prod_flags;
  59                struct smc_cdc_conn_state_flags conn_state_flags;
  60        } __packed;
  61#ifdef KERNEL_HAS_ATOMIC64
  62        atomic64_t              acurs;          /* for atomic processing */
  63#else
  64        u64                     acurs;          /* for atomic processing */
  65#endif
  66} __aligned(8);
  67
  68/* CDC message for SMC-D */
  69struct smcd_cdc_msg {
  70        struct smc_wr_rx_hdr common;    /* Type = 0xFE */
  71        u8 res1[7];
  72        union smcd_cdc_cursor   prod;
  73        union smcd_cdc_cursor   cons;
  74        u8 res3[8];
  75} __aligned(8);
  76
  77static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
  78{
  79        return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort ||
  80               conn->local_rx_ctrl.conn_state_flags.peer_conn_closed;
  81}
  82
  83static inline bool smc_cdc_rxed_any_close_or_senddone(
  84        struct smc_connection *conn)
  85{
  86        return smc_cdc_rxed_any_close(conn) ||
  87               conn->local_rx_ctrl.conn_state_flags.peer_done_writing;
  88}
  89
  90static inline void smc_curs_add(int size, union smc_host_cursor *curs,
  91                                int value)
  92{
  93        curs->count += value;
  94        if (curs->count >= size) {
  95                curs->wrap++;
  96                curs->count -= size;
  97        }
  98}
  99
 100/* Copy cursor src into tgt */
 101static inline void smc_curs_copy(union smc_host_cursor *tgt,
 102                                 union smc_host_cursor *src,
 103                                 struct smc_connection *conn)
 104{
 105#ifndef KERNEL_HAS_ATOMIC64
 106        unsigned long flags;
 107
 108        spin_lock_irqsave(&conn->acurs_lock, flags);
 109        tgt->acurs = src->acurs;
 110        spin_unlock_irqrestore(&conn->acurs_lock, flags);
 111#else
 112        atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
 113#endif
 114}
 115
 116static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
 117                                     union smc_cdc_cursor *src,
 118                                     struct smc_connection *conn)
 119{
 120#ifndef KERNEL_HAS_ATOMIC64
 121        unsigned long flags;
 122
 123        spin_lock_irqsave(&conn->acurs_lock, flags);
 124        tgt->acurs = src->acurs;
 125        spin_unlock_irqrestore(&conn->acurs_lock, flags);
 126#else
 127        atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
 128#endif
 129}
 130
 131static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
 132                                  union smcd_cdc_cursor *src,
 133                                  struct smc_connection *conn)
 134{
 135#ifndef KERNEL_HAS_ATOMIC64
 136        unsigned long flags;
 137
 138        spin_lock_irqsave(&conn->acurs_lock, flags);
 139        tgt->acurs = src->acurs;
 140        spin_unlock_irqrestore(&conn->acurs_lock, flags);
 141#else
 142        atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
 143#endif
 144}
 145
 146/* calculate cursor difference between old and new, where old <= new and
 147 * difference cannot exceed size
 148 */
 149static inline int smc_curs_diff(unsigned int size,
 150                                union smc_host_cursor *old,
 151                                union smc_host_cursor *new)
 152{
 153        if (old->wrap != new->wrap)
 154                return max_t(int, 0,
 155                             ((size - old->count) + new->count));
 156
 157        return max_t(int, 0, (new->count - old->count));
 158}
 159
 160/* calculate cursor difference between old and new - returns negative
 161 * value in case old > new
 162 */
 163static inline int smc_curs_comp(unsigned int size,
 164                                union smc_host_cursor *old,
 165                                union smc_host_cursor *new)
 166{
 167        if (old->wrap > new->wrap ||
 168            (old->wrap == new->wrap && old->count > new->count))
 169                return -smc_curs_diff(size, new, old);
 170        return smc_curs_diff(size, old, new);
 171}
 172
 173/* calculate cursor difference between old and new, where old <= new and
 174 * difference may exceed size
 175 */
 176static inline int smc_curs_diff_large(unsigned int size,
 177                                      union smc_host_cursor *old,
 178                                      union smc_host_cursor *new)
 179{
 180        if (old->wrap < new->wrap)
 181                return min_t(int,
 182                             (size - old->count) + new->count +
 183                             (new->wrap - old->wrap - 1) * size,
 184                             size);
 185
 186        if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
 187                return min_t(int,
 188                             (size - old->count) + new->count +
 189                             (new->wrap + 0xffff - old->wrap) * size,
 190                             size);
 191
 192        return max_t(int, 0, (new->count - old->count));
 193}
 194
 195static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
 196                                          union smc_host_cursor *local,
 197                                          union smc_host_cursor *save,
 198                                          struct smc_connection *conn)
 199{
 200        smc_curs_copy(save, local, conn);
 201        peer->count = htonl(save->count);
 202        peer->wrap = htons(save->wrap);
 203        /* peer->reserved = htons(0); must be ensured by caller */
 204}
 205
 206static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
 207                                       struct smc_connection *conn,
 208                                       union smc_host_cursor *save)
 209{
 210        struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
 211
 212        peer->common.type = local->common.type;
 213        peer->len = local->len;
 214        peer->seqno = htons(local->seqno);
 215        peer->token = htonl(local->token);
 216        smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
 217        smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
 218        peer->prod_flags = local->prod_flags;
 219        peer->conn_state_flags = local->conn_state_flags;
 220}
 221
 222static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
 223                                          union smc_cdc_cursor *peer,
 224                                          struct smc_connection *conn)
 225{
 226        union smc_host_cursor temp, old;
 227        union smc_cdc_cursor net;
 228
 229        smc_curs_copy(&old, local, conn);
 230        smc_curs_copy_net(&net, peer, conn);
 231        temp.count = ntohl(net.count);
 232        temp.wrap = ntohs(net.wrap);
 233        if ((old.wrap > temp.wrap) && temp.wrap)
 234                return;
 235        if ((old.wrap == temp.wrap) &&
 236            (old.count > temp.count))
 237                return;
 238        smc_curs_copy(local, &temp, conn);
 239}
 240
 241static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
 242                                        struct smc_cdc_msg *peer,
 243                                        struct smc_connection *conn)
 244{
 245        local->common.type = peer->common.type;
 246        local->len = peer->len;
 247        local->seqno = ntohs(peer->seqno);
 248        local->token = ntohl(peer->token);
 249        smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn);
 250        smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn);
 251        local->prod_flags = peer->prod_flags;
 252        local->conn_state_flags = peer->conn_state_flags;
 253}
 254
 255static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
 256                                        struct smcd_cdc_msg *peer,
 257                                        struct smc_connection *conn)
 258{
 259        union smc_host_cursor temp;
 260
 261        temp.wrap = peer->prod.wrap;
 262        temp.count = peer->prod.count;
 263        smc_curs_copy(&local->prod, &temp, conn);
 264
 265        temp.wrap = peer->cons.wrap;
 266        temp.count = peer->cons.count;
 267        smc_curs_copy(&local->cons, &temp, conn);
 268        local->prod_flags = peer->cons.prod_flags;
 269        local->conn_state_flags = peer->cons.conn_state_flags;
 270}
 271
 272static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
 273                                       struct smc_cdc_msg *peer,
 274                                       struct smc_connection *conn)
 275{
 276        if (conn->lgr->is_smcd)
 277                smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer, conn);
 278        else
 279                smcr_cdc_msg_to_host(local, peer, conn);
 280}
 281
 282struct smc_cdc_tx_pend {
 283        struct smc_connection   *conn;          /* socket connection */
 284        union smc_host_cursor   cursor;         /* tx sndbuf cursor sent */
 285        union smc_host_cursor   p_cursor;       /* rx RMBE cursor produced */
 286        u16                     ctrl_seq;       /* conn. tx sequence # */
 287};
 288
 289int smc_cdc_get_free_slot(struct smc_connection *conn,
 290                          struct smc_link *link,
 291                          struct smc_wr_buf **wr_buf,
 292                          struct smc_rdma_wr **wr_rdma_buf,
 293                          struct smc_cdc_tx_pend **pend);
 294void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
 295int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
 296                     struct smc_cdc_tx_pend *pend);
 297int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
 298int smcd_cdc_msg_send(struct smc_connection *conn);
 299int smcr_cdc_msg_send_validation(struct smc_connection *conn,
 300                                 struct smc_cdc_tx_pend *pend,
 301                                 struct smc_wr_buf *wr_buf);
 302int smc_cdc_init(void) __init;
 303void smcd_cdc_rx_init(struct smc_connection *conn);
 304
 305#endif /* SMC_CDC_H */
 306