linux/drivers/infiniband/hw/hfi1/ipoib.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
   2/*
   3 * Copyright(c) 2020 Intel Corporation.
   4 *
   5 */
   6
   7/*
   8 * This file contains HFI1 support for IPOIB functionality
   9 */
  10
  11#ifndef HFI1_IPOIB_H
  12#define HFI1_IPOIB_H
  13
  14#include <linux/types.h>
  15#include <linux/stddef.h>
  16#include <linux/atomic.h>
  17#include <linux/netdevice.h>
  18#include <linux/slab.h>
  19#include <linux/skbuff.h>
  20#include <linux/list.h>
  21#include <linux/if_infiniband.h>
  22
  23#include "hfi.h"
  24#include "iowait.h"
  25#include "netdev.h"
  26
  27#include <rdma/ib_verbs.h>
  28
  29#define HFI1_IPOIB_ENTROPY_SHIFT   24
  30
  31#define HFI1_IPOIB_TXREQ_NAME_LEN   32
  32
  33#define HFI1_IPOIB_PSEUDO_LEN 20
  34#define HFI1_IPOIB_ENCAP_LEN 4
  35
  36struct hfi1_ipoib_dev_priv;
  37
  38union hfi1_ipoib_flow {
  39        u16 as_int;
  40        struct {
  41                u8 tx_queue;
  42                u8 sc5;
  43        } __attribute__((__packed__));
  44};
  45
  46/**
  47 * struct hfi1_ipoib_circ_buf - List of items to be processed
  48 * @items: ring of items
  49 * @head: ring head
  50 * @tail: ring tail
  51 * @max_items: max items + 1 that the ring can contain
  52 * @producer_lock: producer sync lock
  53 * @consumer_lock: consumer sync lock
  54 */
  55struct hfi1_ipoib_circ_buf {
  56        void **items;
  57        unsigned long head;
  58        unsigned long tail;
  59        unsigned long max_items;
  60        spinlock_t producer_lock; /* head sync lock */
  61        spinlock_t consumer_lock; /* tail sync lock */
  62};
  63
  64/**
  65 * struct hfi1_ipoib_txq - IPOIB per Tx queue information
  66 * @priv: private pointer
  67 * @sde: sdma engine
  68 * @tx_list: tx request list
  69 * @sent_txreqs: count of txreqs posted to sdma
  70 * @stops: count of stops of queue
  71 * @ring_full: ring has been filled
  72 * @no_desc: descriptor shortage seen
  73 * @flow: tracks when list needs to be flushed for a flow change
  74 * @q_idx: ipoib Tx queue index
  75 * @pkts_sent: indicator packets have been sent from this queue
  76 * @wait: iowait structure
  77 * @complete_txreqs: count of txreqs completed by sdma
  78 * @napi: pointer to tx napi interface
  79 * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
  80 */
  81struct hfi1_ipoib_txq {
  82        struct hfi1_ipoib_dev_priv *priv;
  83        struct sdma_engine *sde;
  84        struct list_head tx_list;
  85        u64 sent_txreqs;
  86        atomic_t stops;
  87        atomic_t ring_full;
  88        atomic_t no_desc;
  89        union hfi1_ipoib_flow flow;
  90        u8 q_idx;
  91        bool pkts_sent;
  92        struct iowait wait;
  93
  94        atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
  95        struct napi_struct *napi;
  96        struct hfi1_ipoib_circ_buf tx_ring;
  97};
  98
  99struct hfi1_ipoib_dev_priv {
 100        struct hfi1_devdata *dd;
 101        struct net_device   *netdev;
 102        struct ib_device    *device;
 103        struct hfi1_ipoib_txq *txqs;
 104        struct kmem_cache *txreq_cache;
 105        struct napi_struct *tx_napis;
 106        u16 pkey;
 107        u16 pkey_index;
 108        u32 qkey;
 109        u8 port_num;
 110
 111        const struct net_device_ops *netdev_ops;
 112        struct rvt_qp *qp;
 113        struct pcpu_sw_netstats __percpu *netstats;
 114};
 115
 116/* hfi1 ipoib rdma netdev's private data structure */
 117struct hfi1_ipoib_rdma_netdev {
 118        struct rdma_netdev rn;  /* keep this first */
 119        /* followed by device private data */
 120        struct hfi1_ipoib_dev_priv dev_priv;
 121};
 122
 123static inline struct hfi1_ipoib_dev_priv *
 124hfi1_ipoib_priv(const struct net_device *dev)
 125{
 126        return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
 127}
 128
 129static inline void
 130hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
 131                              u64 packets,
 132                              u64 bytes)
 133{
 134        struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
 135
 136        u64_stats_update_begin(&netstats->syncp);
 137        netstats->rx_packets += packets;
 138        netstats->rx_bytes += bytes;
 139        u64_stats_update_end(&netstats->syncp);
 140}
 141
 142static inline void
 143hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
 144                              u64 packets,
 145                              u64 bytes)
 146{
 147        struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
 148
 149        u64_stats_update_begin(&netstats->syncp);
 150        netstats->tx_packets += packets;
 151        netstats->tx_bytes += bytes;
 152        u64_stats_update_end(&netstats->syncp);
 153}
 154
 155int hfi1_ipoib_send_dma(struct net_device *dev,
 156                        struct sk_buff *skb,
 157                        struct ib_ah *address,
 158                        u32 dqpn);
 159
 160int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
 161void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
 162
 163int hfi1_ipoib_rxq_init(struct net_device *dev);
 164void hfi1_ipoib_rxq_deinit(struct net_device *dev);
 165
 166void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
 167void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
 168
 169struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
 170                                       int size, void *data);
 171
 172int hfi1_ipoib_rn_get_params(struct ib_device *device,
 173                             u8 port_num,
 174                             enum rdma_netdev_t type,
 175                             struct rdma_netdev_alloc_params *params);
 176
 177#endif /* _IPOIB_H */
 178