1
2
3
4#include "ice_txrx_lib.h"
5
6
7
8
9
10
11void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
12{
13 u16 prev_ntu = rx_ring->next_to_use & ~0x7;
14
15 rx_ring->next_to_use = val;
16
17
18 rx_ring->next_to_alloc = val;
19
20
21
22
23
24
25 val &= ~0x7;
26 if (prev_ntu != val) {
27
28
29
30
31
32 wmb();
33 writel(val, rx_ring->tail);
34 }
35}
36
37
38
39
40
41
42
43
44
45static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
46{
47 struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
48
49 if (!decoded.known)
50 return PKT_HASH_TYPE_NONE;
51 if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
52 return PKT_HASH_TYPE_L4;
53 if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
54 return PKT_HASH_TYPE_L3;
55 if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
56 return PKT_HASH_TYPE_L2;
57
58 return PKT_HASH_TYPE_NONE;
59}
60
61
62
63
64
65
66
67
68static void
69ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
70 struct sk_buff *skb, u16 rx_ptype)
71{
72 struct ice_32b_rx_flex_desc_nic *nic_mdid;
73 u32 hash;
74
75 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
76 return;
77
78 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
79 return;
80
81 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
82 hash = le32_to_cpu(nic_mdid->rss_hash);
83 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
84}
85
86
87
88
89
90
91
92
93
94
95static void
96ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
97 union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
98{
99 struct ice_rx_ptype_decoded decoded;
100 u16 rx_status0, rx_status1;
101 bool ipv4, ipv6;
102
103 rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
104 rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
105
106 decoded = ice_decode_rx_desc_ptype(ptype);
107
108
109 skb->ip_summed = CHECKSUM_NONE;
110 skb_checksum_none_assert(skb);
111
112
113 if (!(ring->netdev->features & NETIF_F_RXCSUM))
114 return;
115
116
117 if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
118 return;
119
120 if (!(decoded.known && decoded.outer_ip))
121 return;
122
123 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
124 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
125 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
126 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
127
128 if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
129 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
130 goto checksum_fail;
131
132 if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
133 goto checksum_fail;
134
135
136
137
138 if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
139 goto checksum_fail;
140
141
142 if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
143 (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
144 goto checksum_fail;
145
146
147
148
149
150 if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
151 skb->csum_level = 1;
152
153
154 switch (decoded.inner_prot) {
155 case ICE_RX_PTYPE_INNER_PROT_TCP:
156 case ICE_RX_PTYPE_INNER_PROT_UDP:
157 case ICE_RX_PTYPE_INNER_PROT_SCTP:
158 skb->ip_summed = CHECKSUM_UNNECESSARY;
159 break;
160 default:
161 break;
162 }
163 return;
164
165checksum_fail:
166 ring->vsi->back->hw_csum_rx_error++;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180void
181ice_process_skb_fields(struct ice_ring *rx_ring,
182 union ice_32b_rx_flex_desc *rx_desc,
183 struct sk_buff *skb, u16 ptype)
184{
185 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
186
187
188 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
189
190 ice_rx_csum(rx_ring, skb, rx_desc, ptype);
191
192 if (rx_ring->ptp_rx)
193 ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
194}
195
196
197
198
199
200
201
202
203
204
205void
206ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
207{
208 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
209 (vlan_tag & VLAN_VID_MASK))
210 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
211 napi_gro_receive(&rx_ring->q_vector->napi, skb);
212}
213
214
215
216
217
218
219
220int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
221{
222 u16 i = xdp_ring->next_to_use;
223 struct ice_tx_desc *tx_desc;
224 struct ice_tx_buf *tx_buf;
225 dma_addr_t dma;
226
227 if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
228 xdp_ring->tx_stats.tx_busy++;
229 return ICE_XDP_CONSUMED;
230 }
231
232 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
233 if (dma_mapping_error(xdp_ring->dev, dma))
234 return ICE_XDP_CONSUMED;
235
236 tx_buf = &xdp_ring->tx_buf[i];
237 tx_buf->bytecount = size;
238 tx_buf->gso_segs = 1;
239 tx_buf->raw_buf = data;
240
241
242 dma_unmap_len_set(tx_buf, len, size);
243 dma_unmap_addr_set(tx_buf, dma, dma);
244
245 tx_desc = ICE_TX_DESC(xdp_ring, i);
246 tx_desc->buf_addr = cpu_to_le64(dma);
247 tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
248 size, 0);
249
250
251
252
253 smp_wmb();
254
255 i++;
256 if (i == xdp_ring->count)
257 i = 0;
258
259 tx_buf->next_to_watch = tx_desc;
260 xdp_ring->next_to_use = i;
261
262 return ICE_XDP_TX;
263}
264
265
266
267
268
269
270
271
272int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
273{
274 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
275
276 if (unlikely(!xdpf))
277 return ICE_XDP_CONSUMED;
278
279 return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
280}
281
282
283
284
285
286
287
288
289
290
291void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
292{
293 if (xdp_res & ICE_XDP_REDIR)
294 xdp_do_flush_map();
295
296 if (xdp_res & ICE_XDP_TX) {
297 struct ice_ring *xdp_ring =
298 rx_ring->vsi->xdp_rings[rx_ring->q_index];
299
300 ice_xdp_ring_update_tail(xdp_ring);
301 }
302}
303