linux/drivers/net/ethernet/google/gve/gve_desc_dqo.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2 * Google virtual Ethernet (gve) driver
   3 *
   4 * Copyright (C) 2015-2021 Google, Inc.
   5 */
   6
   7/* GVE DQO Descriptor formats */
   8
   9#ifndef _GVE_DESC_DQO_H_
  10#define _GVE_DESC_DQO_H_
  11
  12#include <linux/build_bug.h>
  13
  14#define GVE_TX_MAX_HDR_SIZE_DQO 255
  15#define GVE_TX_MIN_TSO_MSS_DQO 88
  16
  17#ifndef __LITTLE_ENDIAN_BITFIELD
  18#error "Only little endian supported"
  19#endif
  20
  21/* Basic TX descriptor (DTYPE 0x0C) */
  22struct gve_tx_pkt_desc_dqo {
  23        __le64 buf_addr;
  24
  25        /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */
  26        u8 dtype: 5;
  27
  28        /* Denotes the last descriptor of a packet. */
  29        u8 end_of_packet: 1;
  30        u8 checksum_offload_enable: 1;
  31
  32        /* If set, will generate a descriptor completion for this descriptor. */
  33        u8 report_event: 1;
  34        u8 reserved0;
  35        __le16 reserved1;
  36
  37        /* The TX completion associated with this packet will contain this tag.
  38         */
  39        __le16 compl_tag;
  40        u16 buf_size: 14;
  41        u16 reserved2: 2;
  42} __packed;
  43static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16);
  44
  45#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc
  46#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)
  47
  48/* Maximum number of data descriptors allowed per packet, or per-TSO segment. */
  49#define GVE_TX_MAX_DATA_DESCS 10
  50
  51/* Min gap between tail and head to avoid cacheline overlap */
  52#define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4
  53
  54/* "report_event" on TX packet descriptors may only be reported on the last
  55 * descriptor of a TX packet, and they must be spaced apart with at least this
  56 * value.
  57 */
  58#define GVE_TX_MIN_RE_INTERVAL 32
  59
  60struct gve_tx_context_cmd_dtype {
  61        u8 dtype: 5;
  62        u8 tso: 1;
  63        u8 reserved1: 2;
  64
  65        u8 reserved2;
  66};
  67
  68static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2);
  69
  70/* TX Native TSO Context DTYPE (0x05)
  71 *
  72 * "flex" fields allow the driver to send additional packet context to HW.
  73 */
  74struct gve_tx_tso_context_desc_dqo {
  75        /* The L4 payload bytes that should be segmented. */
  76        u32 tso_total_len: 24;
  77        u32 flex10: 8;
  78
  79        /* Max segment size in TSO excluding headers. */
  80        u16 mss: 14;
  81        u16 reserved: 2;
  82
  83        u8 header_len; /* Header length to use for TSO offload */
  84        u8 flex11;
  85        struct gve_tx_context_cmd_dtype cmd_dtype;
  86        u8 flex0;
  87        u8 flex5;
  88        u8 flex6;
  89        u8 flex7;
  90        u8 flex8;
  91        u8 flex9;
  92} __packed;
  93static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16);
  94
  95#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5
  96
  97/* General context descriptor for sending metadata. */
  98struct gve_tx_general_context_desc_dqo {
  99        u8 flex4;
 100        u8 flex5;
 101        u8 flex6;
 102        u8 flex7;
 103        u8 flex8;
 104        u8 flex9;
 105        u8 flex10;
 106        u8 flex11;
 107        struct gve_tx_context_cmd_dtype cmd_dtype;
 108        u16 reserved;
 109        u8 flex0;
 110        u8 flex1;
 111        u8 flex2;
 112        u8 flex3;
 113} __packed;
 114static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16);
 115
 116#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4
 117
 118/* Logical structure of metadata which is packed into context descriptor flex
 119 * fields.
 120 */
 121struct gve_tx_metadata_dqo {
 122        union {
 123                struct {
 124                        u8 version;
 125
 126                        /* If `skb->l4_hash` is set, this value should be
 127                         * derived from `skb->hash`.
 128                         *
 129                         * A zero value means no l4_hash was associated with the
 130                         * skb.
 131                         */
 132                        u16 path_hash: 15;
 133
 134                        /* Should be set to 1 if the flow associated with the
 135                         * skb had a rehash from the TCP stack.
 136                         */
 137                        u16 rehash_event: 1;
 138                }  __packed;
 139                u8 bytes[12];
 140        };
 141}  __packed;
 142static_assert(sizeof(struct gve_tx_metadata_dqo) == 12);
 143
 144#define GVE_TX_METADATA_VERSION_DQO 0
 145
 146/* TX completion descriptor */
 147struct gve_tx_compl_desc {
 148        /* For types 0-4 this is the TX queue ID associated with this
 149         * completion.
 150         */
 151        u16 id: 11;
 152
 153        /* See: GVE_COMPL_TYPE_DQO* */
 154        u16 type: 3;
 155        u16 reserved0: 1;
 156
 157        /* Flipped by HW to notify the descriptor is populated. */
 158        u16 generation: 1;
 159        union {
 160                /* For descriptor completions, this is the last index fetched
 161                 * by HW + 1.
 162                 */
 163                __le16 tx_head;
 164
 165                /* For packet completions, this is the completion tag set on the
 166                 * TX packet descriptors.
 167                 */
 168                __le16 completion_tag;
 169        };
 170        __le32 reserved1;
 171} __packed;
 172static_assert(sizeof(struct gve_tx_compl_desc) == 8);
 173
 174#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */
 175#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */
 176#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */
 177#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */
 178
 179/* Descriptor to post buffers to HW on buffer queue. */
 180struct gve_rx_desc_dqo {
 181        __le16 buf_id; /* ID returned in Rx completion descriptor */
 182        __le16 reserved0;
 183        __le32 reserved1;
 184        __le64 buf_addr; /* DMA address of the buffer */
 185        __le64 header_buf_addr;
 186        __le64 reserved2;
 187} __packed;
 188static_assert(sizeof(struct gve_rx_desc_dqo) == 32);
 189
 190/* Descriptor for HW to notify SW of new packets received on RX queue. */
 191struct gve_rx_compl_desc_dqo {
 192        /* Must be 1 */
 193        u8 rxdid: 4;
 194        u8 reserved0: 4;
 195
 196        /* Packet originated from this system rather than the network. */
 197        u8 loopback: 1;
 198        /* Set when IPv6 packet contains a destination options header or routing
 199         * header.
 200         */
 201        u8 ipv6_ex_add: 1;
 202        /* Invalid packet was received. */
 203        u8 rx_error: 1;
 204        u8 reserved1: 5;
 205
 206        u16 packet_type: 10;
 207        u16 ip_hdr_err: 1;
 208        u16 udp_len_err: 1;
 209        u16 raw_cs_invalid: 1;
 210        u16 reserved2: 3;
 211
 212        u16 packet_len: 14;
 213        /* Flipped by HW to notify the descriptor is populated. */
 214        u16 generation: 1;
 215        /* Should be zero. */
 216        u16 buffer_queue_id: 1;
 217
 218        u16 header_len: 10;
 219        u16 rsc: 1;
 220        u16 split_header: 1;
 221        u16 reserved3: 4;
 222
 223        u8 descriptor_done: 1;
 224        u8 end_of_packet: 1;
 225        u8 header_buffer_overflow: 1;
 226        u8 l3_l4_processed: 1;
 227        u8 csum_ip_err: 1;
 228        u8 csum_l4_err: 1;
 229        u8 csum_external_ip_err: 1;
 230        u8 csum_external_udp_err: 1;
 231
 232        u8 status_error1;
 233
 234        __le16 reserved5;
 235        __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
 236
 237        union {
 238                /* Packet checksum. */
 239                __le16 raw_cs;
 240                /* Segment length for RSC packets. */
 241                __le16 rsc_seg_len;
 242        };
 243        __le32 hash;
 244        __le32 reserved6;
 245        __le64 reserved7;
 246} __packed;
 247
 248static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
 249
 250/* Ringing the doorbell too often can hurt performance.
 251 *
 252 * HW requires this value to be at least 8.
 253 */
 254#define GVE_RX_BUF_THRESH_DQO 32
 255
 256#endif /* _GVE_DESC_DQO_H_ */
 257