1
2
3
4#include "ice_txrx_lib.h"
5
6
7
8
9
10
11void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
12{
13 u16 prev_ntu = rx_ring->next_to_use & ~0x7;
14
15 rx_ring->next_to_use = val;
16
17
18 rx_ring->next_to_alloc = val;
19
20
21
22
23
24
25 val &= ~0x7;
26 if (prev_ntu != val) {
27
28
29
30
31
32 wmb();
33 writel(val, rx_ring->tail);
34 }
35}
36
37
38
39
40
41
42
43static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
44{
45 return PKT_HASH_TYPE_NONE;
46}
47
48
49
50
51
52
53
54
55static void
56ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
57 struct sk_buff *skb, u8 rx_ptype)
58{
59 struct ice_32b_rx_flex_desc_nic *nic_mdid;
60 u32 hash;
61
62 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
63 return;
64
65 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
66 return;
67
68 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
69 hash = le32_to_cpu(nic_mdid->rss_hash);
70 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
71}
72
73
74
75
76
77
78
79
80
81
82static void
83ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
84 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
85{
86 struct ice_rx_ptype_decoded decoded;
87 u16 rx_status0, rx_status1;
88 bool ipv4, ipv6;
89
90 rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
91 rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
92
93 decoded = ice_decode_rx_desc_ptype(ptype);
94
95
96 skb->ip_summed = CHECKSUM_NONE;
97 skb_checksum_none_assert(skb);
98
99
100 if (!(ring->netdev->features & NETIF_F_RXCSUM))
101 return;
102
103
104 if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
105 return;
106
107 if (!(decoded.known && decoded.outer_ip))
108 return;
109
110 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
111 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
112 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
113 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
114
115 if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
116 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
117 goto checksum_fail;
118
119 if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
120 goto checksum_fail;
121
122
123
124
125 if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
126 goto checksum_fail;
127
128
129 if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
130 (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
131 goto checksum_fail;
132
133
134
135
136
137 if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
138 skb->csum_level = 1;
139
140
141 switch (decoded.inner_prot) {
142 case ICE_RX_PTYPE_INNER_PROT_TCP:
143 case ICE_RX_PTYPE_INNER_PROT_UDP:
144 case ICE_RX_PTYPE_INNER_PROT_SCTP:
145 skb->ip_summed = CHECKSUM_UNNECESSARY;
146 break;
147 default:
148 break;
149 }
150 return;
151
152checksum_fail:
153 ring->vsi->back->hw_csum_rx_error++;
154}
155
156
157
158
159
160
161
162
163
164
165
166
167void
168ice_process_skb_fields(struct ice_ring *rx_ring,
169 union ice_32b_rx_flex_desc *rx_desc,
170 struct sk_buff *skb, u8 ptype)
171{
172 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
173
174
175 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
176
177 ice_rx_csum(rx_ring, skb, rx_desc, ptype);
178}
179
180
181
182
183
184
185
186
187
188
189void
190ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
191{
192 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
193 (vlan_tag & VLAN_VID_MASK))
194 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
195 napi_gro_receive(&rx_ring->q_vector->napi, skb);
196}
197
198
199
200
201
202
203
204int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
205{
206 u16 i = xdp_ring->next_to_use;
207 struct ice_tx_desc *tx_desc;
208 struct ice_tx_buf *tx_buf;
209 dma_addr_t dma;
210
211 if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
212 xdp_ring->tx_stats.tx_busy++;
213 return ICE_XDP_CONSUMED;
214 }
215
216 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
217 if (dma_mapping_error(xdp_ring->dev, dma))
218 return ICE_XDP_CONSUMED;
219
220 tx_buf = &xdp_ring->tx_buf[i];
221 tx_buf->bytecount = size;
222 tx_buf->gso_segs = 1;
223 tx_buf->raw_buf = data;
224
225
226 dma_unmap_len_set(tx_buf, len, size);
227 dma_unmap_addr_set(tx_buf, dma, dma);
228
229 tx_desc = ICE_TX_DESC(xdp_ring, i);
230 tx_desc->buf_addr = cpu_to_le64(dma);
231 tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
232 size, 0);
233
234
235
236
237 smp_wmb();
238
239 i++;
240 if (i == xdp_ring->count)
241 i = 0;
242
243 tx_buf->next_to_watch = tx_desc;
244 xdp_ring->next_to_use = i;
245
246 return ICE_XDP_TX;
247}
248
249
250
251
252
253
254
255
256int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
257{
258 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
259
260 if (unlikely(!xdpf))
261 return ICE_XDP_CONSUMED;
262
263 return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
264}
265
266
267
268
269
270
271
272
273
274
275void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
276{
277 if (xdp_res & ICE_XDP_REDIR)
278 xdp_do_flush_map();
279
280 if (xdp_res & ICE_XDP_TX) {
281 struct ice_ring *xdp_ring =
282 rx_ring->vsi->xdp_rings[rx_ring->q_index];
283
284 ice_xdp_ring_update_tail(xdp_ring);
285 }
286}
287