1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/cpumask.h>
23#include <linux/of.h>
24#include <linux/of_irq.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/of_address.h>
28#include <linux/phy.h>
29#include <linux/clk.h>
30#include <uapi/linux/ppp_defs.h>
31#include <net/ip.h>
32#include <net/ipv6.h>
33
34
35#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
36#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
37#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
38#define MVPP2_RX_FIFO_INIT_REG 0x64
39
40
41#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
42#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
43#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
44#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
45#define MVPP2_POOL_BUF_SIZE_OFFSET 5
46#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
47#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
48#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
49#define MVPP2_RXQ_POOL_SHORT_OFFS 20
50#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
51#define MVPP2_RXQ_POOL_LONG_OFFS 24
52#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
53#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
54#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
55#define MVPP2_RXQ_DISABLE_MASK BIT(31)
56
57
58#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
59#define MVPP2_PRS_PORT_LU_MAX 0xf
60#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
61#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
62#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
63#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
64#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
65#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
66#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
67#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
68#define MVPP2_PRS_TCAM_IDX_REG 0x1100
69#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
70#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
71#define MVPP2_PRS_SRAM_IDX_REG 0x1200
72#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
73#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
74#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
75
76
77#define MVPP2_CLS_MODE_REG 0x1800
78#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
79#define MVPP2_CLS_PORT_WAY_REG 0x1810
80#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
81#define MVPP2_CLS_LKP_INDEX_REG 0x1814
82#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
83#define MVPP2_CLS_LKP_TBL_REG 0x1818
84#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
85#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
86#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
87#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
88#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
89#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
90#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
91#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
92#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
93#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
94#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
95#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
96
97
98#define MVPP2_RXQ_NUM_REG 0x2040
99#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
100#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
101#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
102#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
103#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
104#define MVPP2_RXQ_NUM_NEW_OFFSET 16
105#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
106#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
107#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
108#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
109#define MVPP2_RXQ_THRESH_REG 0x204c
110#define MVPP2_OCCUPIED_THRESH_OFFSET 0
111#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
112#define MVPP2_RXQ_INDEX_REG 0x2050
113#define MVPP2_TXQ_NUM_REG 0x2080
114#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
115#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
116#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
117#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
118#define MVPP2_TXQ_THRESH_REG 0x2094
119#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
120#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
121#define MVPP2_TXQ_INDEX_REG 0x2098
122#define MVPP2_TXQ_PREF_BUF_REG 0x209c
123#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
124#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
125#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
126#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
127#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
128#define MVPP2_TXQ_PENDING_REG 0x20a0
129#define MVPP2_TXQ_PENDING_MASK 0x3fff
130#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
131#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
132#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
133#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
134#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
135#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
136#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
137#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
138#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
139#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
140#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
141#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
142#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
143#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
144#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
145#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
146
147
148#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
149#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
150#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
151#define MVPP2_BASE_ADDR_ENABLE 0x4060
152
153
154#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
155#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
156#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
157#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
158#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
159#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
160#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
161#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
162#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
163#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
164#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
165#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
166#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
167#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
168#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
169#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
170#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
171#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
172#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
173#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
174
175
176#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
177#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
178#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
179#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
180#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
181#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
182#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
183#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
184#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
185#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
186#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
187#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
188#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
189#define MVPP2_BM_START_MASK BIT(0)
190#define MVPP2_BM_STOP_MASK BIT(1)
191#define MVPP2_BM_STATE_MASK BIT(4)
192#define MVPP2_BM_LOW_THRESH_OFFS 8
193#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
194#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
195 MVPP2_BM_LOW_THRESH_OFFS)
196#define MVPP2_BM_HIGH_THRESH_OFFS 16
197#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
198#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
199 MVPP2_BM_HIGH_THRESH_OFFS)
200#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
201#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
202#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
203#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
204#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
205#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
206#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
207#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
208#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
209#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
210#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
211#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
212#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
213#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
214#define MVPP2_BM_VIRT_RLS_REG 0x64c0
215#define MVPP2_BM_MC_RLS_REG 0x64c4
216#define MVPP2_BM_MC_ID_MASK 0xfff
217#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
218
219
220#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
221#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
222#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
223#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
224#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
225#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
226#define MVPP2_TXP_SCHED_MTU_REG 0x801c
227#define MVPP2_TXP_MTU_MAX 0x7FFFF
228#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
229#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
230#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
231#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
232#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
233#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
234#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
235#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
236#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
237#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
238#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
239#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
240#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
241#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
242
243
244#define MVPP2_TX_SNOOP_REG 0x8800
245#define MVPP2_TX_PORT_FLUSH_REG 0x8810
246#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
247
248
249#define MVPP2_SRC_ADDR_MIDDLE 0x24
250#define MVPP2_SRC_ADDR_HIGH 0x28
251#define MVPP2_PHY_AN_CFG0_REG 0x34
252#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
253#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
254 0x400 + (port) * 0x400)
255#define MVPP2_MIB_LATE_COLLISION 0x7c
256#define MVPP2_ISR_SUM_MASK_REG 0x220c
257#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
258#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
259
260
261#define MVPP2_GMAC_CTRL_0_REG 0x0
262#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
263#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
264#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
265#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
266#define MVPP2_GMAC_CTRL_1_REG 0x4
267#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
268#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
269#define MVPP2_GMAC_PCS_LB_EN_BIT 6
270#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
271#define MVPP2_GMAC_SA_LOW_OFFS 7
272#define MVPP2_GMAC_CTRL_2_REG 0x8
273#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
274#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
275#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
276#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
277#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
278#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
279#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
280#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
281#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
282#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
283#define MVPP2_GMAC_FC_ADV_EN BIT(9)
284#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
285#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
286#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
287#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
288#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
289#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
290 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
291
292#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
293
294
295#define MVPP2_QUEUE_NEXT_DESC(q, index) \
296 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
297
298
299
300
301#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
302#define MVPP2_RX_COAL_PKTS 32
303#define MVPP2_RX_COAL_USEC 100
304
305
306
307
308
309
310
311
312
313#define MVPP2_MH_SIZE 2
314#define MVPP2_ETH_TYPE_LEN 2
315#define MVPP2_PPPOE_HDR_SIZE 8
316#define MVPP2_VLAN_TAG_LEN 4
317
318
319#define MVPP2_IP_LBDT_TYPE 0xfffa
320
321#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
322#define MVPP2_TX_CSUM_MAX_SIZE 9800
323
324
325#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
326#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
327
328#define MVPP2_TX_MTU_MAX 0x7ffff
329
330
331#define MVPP2_MAX_TCONT 16
332
333
334#define MVPP2_MAX_PORTS 4
335
336
337#define MVPP2_MAX_TXQ 8
338
339
340#define MVPP2_MAX_RXQ 8
341
342
343#define MVPP2_DEFAULT_RXQ 4
344
345
346#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
347
348
349#define MVPP2_MAX_RXD 128
350
351
352#define MVPP2_MAX_TXD 1024
353
354
355#define MVPP2_CPU_DESC_CHUNK 64
356
357
358#define MVPP2_AGGR_TXQ_SIZE 256
359
360
361#define MVPP2_DESC_ALIGNED_SIZE 32
362
363
364#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
365
366
367#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
368#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
369#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
370
371
372#define MVPP2_SKB_SHINFO_SIZE \
373 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
374
375#define MVPP2_RX_PKT_SIZE(mtu) \
376 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
377 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
378
379#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
380#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
381#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
382 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
383
384#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
385
386
387#define MVPP2_MAX_L3_ADDR_SIZE 16
388
389
390#define MVPP2_F_LOOPBACK BIT(0)
391
392
393enum mvpp2_tag_type {
394 MVPP2_TAG_TYPE_NONE = 0,
395 MVPP2_TAG_TYPE_MH = 1,
396 MVPP2_TAG_TYPE_DSA = 2,
397 MVPP2_TAG_TYPE_EDSA = 3,
398 MVPP2_TAG_TYPE_VLAN = 4,
399 MVPP2_TAG_TYPE_LAST = 5
400};
401
402
403#define MVPP2_PRS_TCAM_SRAM_SIZE 256
404#define MVPP2_PRS_TCAM_WORDS 6
405#define MVPP2_PRS_SRAM_WORDS 4
406#define MVPP2_PRS_FLOW_ID_SIZE 64
407#define MVPP2_PRS_FLOW_ID_MASK 0x3f
408#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
409#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
410#define MVPP2_PRS_IPV4_HEAD 0x40
411#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
412#define MVPP2_PRS_IPV4_MC 0xe0
413#define MVPP2_PRS_IPV4_MC_MASK 0xf0
414#define MVPP2_PRS_IPV4_BC_MASK 0xff
415#define MVPP2_PRS_IPV4_IHL 0x5
416#define MVPP2_PRS_IPV4_IHL_MASK 0xf
417#define MVPP2_PRS_IPV6_MC 0xff
418#define MVPP2_PRS_IPV6_MC_MASK 0xff
419#define MVPP2_PRS_IPV6_HOP_MASK 0xff
420#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
421#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
422#define MVPP2_PRS_DBL_VLANS_MAX 100
423
424
425
426
427
428
429
430
431#define MVPP2_PRS_AI_BITS 8
432#define MVPP2_PRS_PORT_MASK 0xff
433#define MVPP2_PRS_LU_MASK 0xf
434#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
435 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
436#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
437 (((offs) * 2) - ((offs) % 2) + 2)
438#define MVPP2_PRS_TCAM_AI_BYTE 16
439#define MVPP2_PRS_TCAM_PORT_BYTE 17
440#define MVPP2_PRS_TCAM_LU_BYTE 20
441#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
442#define MVPP2_PRS_TCAM_INV_WORD 5
443
444#define MVPP2_PE_DROP_ALL 0
445#define MVPP2_PE_FIRST_FREE_TID 1
446#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
447#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
448#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
449#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
450#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
451#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
452#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
453#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
454#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
455#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
456#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
457#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
458#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
459#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
460#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
461#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
462#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
463#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
464#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
465#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
466#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
467#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
468#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
469#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
470#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
471
472
473
474
475#define MVPP2_PRS_SRAM_RI_OFFS 0
476#define MVPP2_PRS_SRAM_RI_WORD 0
477#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
478#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
479#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
480#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
481#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
482#define MVPP2_PRS_SRAM_UDF_OFFS 73
483#define MVPP2_PRS_SRAM_UDF_BITS 8
484#define MVPP2_PRS_SRAM_UDF_MASK 0xff
485#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
486#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
487#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
488#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
489#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
490#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
491#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
492#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
493#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
494#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
495#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
496#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
497#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
498#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
499#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
500#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
501#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
502#define MVPP2_PRS_SRAM_AI_OFFS 90
503#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
504#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
505#define MVPP2_PRS_SRAM_AI_MASK 0xff
506#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
507#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
508#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
509#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
510
511
512#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
513#define MVPP2_PRS_RI_DSA_MASK 0x2
514#define MVPP2_PRS_RI_VLAN_MASK 0xc
515#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
516#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
517#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
518#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
519#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
520#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
521#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
522#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
523#define MVPP2_PRS_RI_L2_MCAST BIT(9)
524#define MVPP2_PRS_RI_L2_BCAST BIT(10)
525#define MVPP2_PRS_RI_PPPOE_MASK 0x800
526#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
527#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
528#define MVPP2_PRS_RI_L3_IP4 BIT(12)
529#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
530#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
531#define MVPP2_PRS_RI_L3_IP6 BIT(14)
532#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
533#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
534#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
535#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
536#define MVPP2_PRS_RI_L3_MCAST BIT(15)
537#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
538#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
539#define MVPP2_PRS_RI_UDF3_MASK 0x300000
540#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
541#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
542#define MVPP2_PRS_RI_L4_TCP BIT(22)
543#define MVPP2_PRS_RI_L4_UDP BIT(23)
544#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
545#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
546#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
547#define MVPP2_PRS_RI_DROP_MASK 0x80000000
548
549
550#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
551#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
552#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
553#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
554#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
555#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
556#define MVPP2_PRS_SINGLE_VLAN_AI 0
557#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
558
559
560#define MVPP2_PRS_TAGGED true
561#define MVPP2_PRS_UNTAGGED false
562#define MVPP2_PRS_EDSA true
563#define MVPP2_PRS_DSA false
564
565
566enum mvpp2_prs_udf {
567 MVPP2_PRS_UDF_MAC_DEF,
568 MVPP2_PRS_UDF_MAC_RANGE,
569 MVPP2_PRS_UDF_L2_DEF,
570 MVPP2_PRS_UDF_L2_DEF_COPY,
571 MVPP2_PRS_UDF_L2_USER,
572};
573
574
575enum mvpp2_prs_lookup {
576 MVPP2_PRS_LU_MH,
577 MVPP2_PRS_LU_MAC,
578 MVPP2_PRS_LU_DSA,
579 MVPP2_PRS_LU_VLAN,
580 MVPP2_PRS_LU_L2,
581 MVPP2_PRS_LU_PPPOE,
582 MVPP2_PRS_LU_IP4,
583 MVPP2_PRS_LU_IP6,
584 MVPP2_PRS_LU_FLOWS,
585 MVPP2_PRS_LU_LAST,
586};
587
588
589enum mvpp2_prs_l3_cast {
590 MVPP2_PRS_L3_UNI_CAST,
591 MVPP2_PRS_L3_MULTI_CAST,
592 MVPP2_PRS_L3_BROAD_CAST
593};
594
595
596#define MVPP2_CLS_FLOWS_TBL_SIZE 512
597#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
598#define MVPP2_CLS_LKP_TBL_SIZE 64
599
600
601#define MVPP2_BM_POOLS_NUM 8
602#define MVPP2_BM_LONG_BUF_NUM 1024
603#define MVPP2_BM_SHORT_BUF_NUM 2048
604#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
605#define MVPP2_BM_POOL_PTR_ALIGN 128
606#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
607#define MVPP2_BM_SWF_SHORT_POOL 3
608
609
610#define MVPP2_BM_COOKIE_POOL_OFFS 8
611#define MVPP2_BM_COOKIE_CPU_OFFS 24
612
613
614
615
616
617#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
618
619enum mvpp2_bm_type {
620 MVPP2_BM_FREE,
621 MVPP2_BM_SWF_LONG,
622 MVPP2_BM_SWF_SHORT
623};
624
625
626
627
628struct mvpp2 {
629
630 void __iomem *base;
631 void __iomem *lms_base;
632
633
634 struct clk *pp_clk;
635 struct clk *gop_clk;
636
637
638 struct mvpp2_port **port_list;
639
640
641 struct mvpp2_tx_queue *aggr_txqs;
642
643
644 struct mvpp2_bm_pool *bm_pools;
645
646
647 struct mvpp2_prs_shadow *prs_shadow;
648
649 bool *prs_double_vlans;
650
651
652 u32 tclk;
653};
654
655struct mvpp2_pcpu_stats {
656 struct u64_stats_sync syncp;
657 u64 rx_packets;
658 u64 rx_bytes;
659 u64 tx_packets;
660 u64 tx_bytes;
661};
662
663struct mvpp2_port {
664 u8 id;
665
666 int irq;
667
668 struct mvpp2 *priv;
669
670
671 void __iomem *base;
672
673 struct mvpp2_rx_queue **rxqs;
674 struct mvpp2_tx_queue **txqs;
675 struct net_device *dev;
676
677 int pkt_size;
678
679 u32 pending_cause_rx;
680 struct napi_struct napi;
681
682
683 unsigned long flags;
684
685 u16 tx_ring_size;
686 u16 rx_ring_size;
687 struct mvpp2_pcpu_stats __percpu *stats;
688
689 struct phy_device *phy_dev;
690 phy_interface_t phy_interface;
691 struct device_node *phy_node;
692 unsigned int link;
693 unsigned int duplex;
694 unsigned int speed;
695
696 struct mvpp2_bm_pool *pool_long;
697 struct mvpp2_bm_pool *pool_short;
698
699
700 u8 first_rxq;
701};
702
703
704
705
706
707
708#define MVPP2_TXD_L3_OFF_SHIFT 0
709#define MVPP2_TXD_IP_HLEN_SHIFT 8
710#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
711#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
712#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
713#define MVPP2_TXD_PADDING_DISABLE BIT(23)
714#define MVPP2_TXD_L4_UDP BIT(24)
715#define MVPP2_TXD_L3_IP6 BIT(26)
716#define MVPP2_TXD_L_DESC BIT(28)
717#define MVPP2_TXD_F_DESC BIT(29)
718
719#define MVPP2_RXD_ERR_SUMMARY BIT(15)
720#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
721#define MVPP2_RXD_ERR_CRC 0x0
722#define MVPP2_RXD_ERR_OVERRUN BIT(13)
723#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
724#define MVPP2_RXD_BM_POOL_ID_OFFS 16
725#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
726#define MVPP2_RXD_HWF_SYNC BIT(21)
727#define MVPP2_RXD_L4_CSUM_OK BIT(22)
728#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
729#define MVPP2_RXD_L4_TCP BIT(25)
730#define MVPP2_RXD_L4_UDP BIT(26)
731#define MVPP2_RXD_L3_IP4 BIT(28)
732#define MVPP2_RXD_L3_IP6 BIT(30)
733#define MVPP2_RXD_BUF_HDR BIT(31)
734
735struct mvpp2_tx_desc {
736 u32 command;
737 u8 packet_offset;
738 u8 phys_txq;
739 u16 data_size;
740 u32 buf_phys_addr;
741 u32 buf_cookie;
742 u32 reserved1[3];
743 u32 reserved2;
744};
745
746struct mvpp2_rx_desc {
747 u32 status;
748 u16 reserved1;
749 u16 data_size;
750 u32 buf_phys_addr;
751 u32 buf_cookie;
752 u16 reserved2;
753 u16 reserved3;
754 u8 reserved4;
755 u8 reserved5;
756 u16 reserved6;
757 u32 reserved7;
758 u32 reserved8;
759};
760
761
762struct mvpp2_txq_pcpu {
763 int cpu;
764
765
766 int size;
767
768
769
770
771 int count;
772
773
774 int reserved_num;
775
776
777 struct sk_buff **tx_skb;
778
779
780 int txq_put_index;
781
782
783 int txq_get_index;
784};
785
786struct mvpp2_tx_queue {
787
788 u8 id;
789
790
791 u8 log_id;
792
793
794 int size;
795
796
797 int count;
798
799
800 struct mvpp2_txq_pcpu __percpu *pcpu;
801
802
803 struct sk_buff **tx_skb;
804
805 u32 done_pkts_coal;
806
807
808 struct mvpp2_tx_desc *descs;
809
810
811 dma_addr_t descs_phys;
812
813
814 int last_desc;
815
816
817 int next_desc_to_proc;
818};
819
820struct mvpp2_rx_queue {
821
822 u8 id;
823
824
825 int size;
826
827 u32 pkts_coal;
828 u32 time_coal;
829
830
831 struct mvpp2_rx_desc *descs;
832
833
834 dma_addr_t descs_phys;
835
836
837 int last_desc;
838
839
840 int next_desc_to_proc;
841
842
843 int port;
844
845
846 int logic_rxq;
847};
848
849union mvpp2_prs_tcam_entry {
850 u32 word[MVPP2_PRS_TCAM_WORDS];
851 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
852};
853
854union mvpp2_prs_sram_entry {
855 u32 word[MVPP2_PRS_SRAM_WORDS];
856 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
857};
858
859struct mvpp2_prs_entry {
860 u32 index;
861 union mvpp2_prs_tcam_entry tcam;
862 union mvpp2_prs_sram_entry sram;
863};
864
865struct mvpp2_prs_shadow {
866 bool valid;
867 bool finish;
868
869
870 int lu;
871
872
873 int udf;
874
875
876 u32 ri;
877 u32 ri_mask;
878};
879
880struct mvpp2_cls_flow_entry {
881 u32 index;
882 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
883};
884
885struct mvpp2_cls_lookup_entry {
886 u32 lkpid;
887 u32 way;
888 u32 data;
889};
890
891struct mvpp2_bm_pool {
892
893 int id;
894 enum mvpp2_bm_type type;
895
896
897 int size;
898
899 int buf_num;
900
901 int buf_size;
902
903 int pkt_size;
904
905
906 u32 *virt_addr;
907
908 dma_addr_t phys_addr;
909
910
911 u32 port_map;
912
913
914 atomic_t in_use;
915 int in_use_thresh;
916
917 spinlock_t lock;
918};
919
920struct mvpp2_buff_hdr {
921 u32 next_buff_phys_addr;
922 u32 next_buff_virt_addr;
923 u16 byte_count;
924 u16 info;
925 u8 reserved1;
926};
927
928
929#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
930#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
931#define MVPP2_B_HDR_INFO_LAST_OFFS 12
932#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
933#define MVPP2_B_HDR_INFO_IS_LAST(info) \
934 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
935
936
937
938
939static int rxq_number = MVPP2_DEFAULT_RXQ;
940
941static int txq_number = MVPP2_MAX_TXQ;
942
943#define MVPP2_DRIVER_NAME "mvpp2"
944#define MVPP2_DRIVER_VERSION "1.0"
945
946
947
948static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
949{
950 writel(data, priv->base + offset);
951}
952
953static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
954{
955 return readl(priv->base + offset);
956}
957
958static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
959{
960 txq_pcpu->txq_get_index++;
961 if (txq_pcpu->txq_get_index == txq_pcpu->size)
962 txq_pcpu->txq_get_index = 0;
963}
964
965static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
966 struct sk_buff *skb)
967{
968 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
969 txq_pcpu->txq_put_index++;
970 if (txq_pcpu->txq_put_index == txq_pcpu->size)
971 txq_pcpu->txq_put_index = 0;
972}
973
974
975static inline int mvpp2_egress_port(struct mvpp2_port *port)
976{
977 return MVPP2_MAX_TCONT + port->id;
978}
979
980
981static inline int mvpp2_txq_phys(int port, int txq)
982{
983 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
984}
985
986
987
988
989static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
990{
991 int i;
992
993 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
994 return -EINVAL;
995
996
997 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
998
999
1000 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1001 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1002 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1003
1004
1005 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1006 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1007 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1008
1009 return 0;
1010}
1011
1012
1013static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1014{
1015 int i;
1016
1017 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1018 return -EINVAL;
1019
1020
1021 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1022
1023 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1024 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1025 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1026 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1027
1028 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1029 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1030
1031
1032 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1033 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1034 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1035
1036 return 0;
1037}
1038
1039
1040static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1041{
1042
1043 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1044 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1045 MVPP2_PRS_TCAM_INV_MASK);
1046}
1047
1048
1049static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1050{
1051 priv->prs_shadow[index].valid = true;
1052 priv->prs_shadow[index].lu = lu;
1053}
1054
1055
1056static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1057 unsigned int ri, unsigned int ri_mask)
1058{
1059 priv->prs_shadow[index].ri_mask = ri_mask;
1060 priv->prs_shadow[index].ri = ri;
1061}
1062
1063
1064static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1065{
1066 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1067
1068 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1069 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1070}
1071
1072
1073static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1074 unsigned int port, bool add)
1075{
1076 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1077
1078 if (add)
1079 pe->tcam.byte[enable_off] &= ~(1 << port);
1080 else
1081 pe->tcam.byte[enable_off] |= 1 << port;
1082}
1083
1084
1085static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1086 unsigned int ports)
1087{
1088 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1089 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1090
1091 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1092 pe->tcam.byte[enable_off] &= ~port_mask;
1093 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1094}
1095
1096
1097static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1098{
1099 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1100
1101 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1102}
1103
1104
1105static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1106 unsigned int offs, unsigned char byte,
1107 unsigned char enable)
1108{
1109 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1110 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1111}
1112
1113
1114static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1115 unsigned int offs, unsigned char *byte,
1116 unsigned char *enable)
1117{
1118 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1119 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1120}
1121
1122
1123static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1124 u16 data)
1125{
1126 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1127 u16 tcam_data;
1128
1129 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1130 if (tcam_data != data)
1131 return false;
1132 return true;
1133}
1134
1135
1136static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1137 unsigned int bits, unsigned int enable)
1138{
1139 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1140
1141 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1142
1143 if (!(enable & BIT(i)))
1144 continue;
1145
1146 if (bits & BIT(i))
1147 pe->tcam.byte[ai_idx] |= 1 << i;
1148 else
1149 pe->tcam.byte[ai_idx] &= ~(1 << i);
1150 }
1151
1152 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1153}
1154
1155
1156static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1157{
1158 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1159}
1160
1161
1162static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1163 unsigned short ethertype)
1164{
1165 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1166 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1167}
1168
1169
1170static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1171 int val)
1172{
1173 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1174}
1175
1176
1177static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1178 int val)
1179{
1180 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1181}
1182
1183
1184static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1185 unsigned int bits, unsigned int mask)
1186{
1187 unsigned int i;
1188
1189 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1190 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1191
1192 if (!(mask & BIT(i)))
1193 continue;
1194
1195 if (bits & BIT(i))
1196 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1197 else
1198 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1199
1200 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1201 }
1202}
1203
1204
1205static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1206{
1207 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1208}
1209
1210
1211static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1212 unsigned int bits, unsigned int mask)
1213{
1214 unsigned int i;
1215 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1216
1217 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1218
1219 if (!(mask & BIT(i)))
1220 continue;
1221
1222 if (bits & BIT(i))
1223 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1224 else
1225 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1226
1227 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1228 }
1229}
1230
1231
1232static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1233{
1234 u8 bits;
1235 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1236 int ai_en_off = ai_off + 1;
1237 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1238
1239 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1240 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1241
1242 return bits;
1243}
1244
1245
1246
1247
1248static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1249 unsigned int lu)
1250{
1251 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1252
1253 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1254 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1255 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1256}
1257
1258
1259
1260
1261static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1262 unsigned int op)
1263{
1264
1265 if (shift < 0) {
1266 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1267 shift = 0 - shift;
1268 } else {
1269 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1270 }
1271
1272
1273 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1274 (unsigned char)shift;
1275
1276
1277 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1278 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1279 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1280
1281
1282 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1283}
1284
1285
1286
1287
1288static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1289 unsigned int type, int offset,
1290 unsigned int op)
1291{
1292
1293 if (offset < 0) {
1294 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1295 offset = 0 - offset;
1296 } else {
1297 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1298 }
1299
1300
1301 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1302 MVPP2_PRS_SRAM_UDF_MASK);
1303 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1304 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1305 MVPP2_PRS_SRAM_UDF_BITS)] &=
1306 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1307 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1308 MVPP2_PRS_SRAM_UDF_BITS)] |=
1309 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1310
1311
1312 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1313 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1314 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1315
1316
1317 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1318 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1319 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1320
1321 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1322 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1323 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1324 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1325
1326 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1327 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1328 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1329
1330
1331 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1332}
1333
1334
1335static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1336{
1337 struct mvpp2_prs_entry *pe;
1338 int tid;
1339
1340 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1341 if (!pe)
1342 return NULL;
1343 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1344
1345
1346 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1347 u8 bits;
1348
1349 if (!priv->prs_shadow[tid].valid ||
1350 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1351 continue;
1352
1353 pe->index = tid;
1354 mvpp2_prs_hw_read(priv, pe);
1355 bits = mvpp2_prs_sram_ai_get(pe);
1356
1357
1358 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1359 return pe;
1360 }
1361 kfree(pe);
1362
1363 return NULL;
1364}
1365
1366
1367static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1368 unsigned char end)
1369{
1370 int tid;
1371
1372 if (start > end)
1373 swap(start, end);
1374
1375 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1376 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1377
1378 for (tid = start; tid <= end; tid++) {
1379 if (!priv->prs_shadow[tid].valid)
1380 return tid;
1381 }
1382
1383 return -EINVAL;
1384}
1385
1386
1387static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1388{
1389 struct mvpp2_prs_entry pe;
1390
1391 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1392
1393 pe.index = MVPP2_PE_DROP_ALL;
1394 mvpp2_prs_hw_read(priv, &pe);
1395 } else {
1396
1397 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1398 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1399 pe.index = MVPP2_PE_DROP_ALL;
1400
1401
1402 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1403 MVPP2_PRS_RI_DROP_MASK);
1404
1405 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1406 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1407
1408
1409 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1410
1411
1412 mvpp2_prs_tcam_port_map_set(&pe, 0);
1413 }
1414
1415
1416 mvpp2_prs_tcam_port_set(&pe, port, add);
1417
1418 mvpp2_prs_hw_write(priv, &pe);
1419}
1420
1421
1422static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1423{
1424 struct mvpp2_prs_entry pe;
1425
1426
1427
1428 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1429
1430 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1431 mvpp2_prs_hw_read(priv, &pe);
1432 } else {
1433
1434 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1435 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1436 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1437
1438
1439 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1440
1441
1442 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1443 MVPP2_PRS_RI_L2_CAST_MASK);
1444
1445
1446 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1447 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1448
1449
1450 mvpp2_prs_tcam_port_map_set(&pe, 0);
1451
1452
1453 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1454 }
1455
1456
1457 mvpp2_prs_tcam_port_set(&pe, port, add);
1458
1459 mvpp2_prs_hw_write(priv, &pe);
1460}
1461
1462
1463static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1464 bool add)
1465{
1466 struct mvpp2_prs_entry pe;
1467 unsigned char da_mc;
1468
1469
1470
1471
1472 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1473
1474 if (priv->prs_shadow[index].valid) {
1475
1476 pe.index = index;
1477 mvpp2_prs_hw_read(priv, &pe);
1478 } else {
1479
1480 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1481 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1482 pe.index = index;
1483
1484
1485 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1486
1487
1488 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1489 MVPP2_PRS_RI_L2_CAST_MASK);
1490
1491
1492 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1493
1494
1495 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1496 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1497
1498
1499 mvpp2_prs_tcam_port_map_set(&pe, 0);
1500
1501
1502 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1503 }
1504
1505
1506 mvpp2_prs_tcam_port_set(&pe, port, add);
1507
1508 mvpp2_prs_hw_write(priv, &pe);
1509}
1510
1511
1512static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1513 bool tagged, bool extend)
1514{
1515 struct mvpp2_prs_entry pe;
1516 int tid, shift;
1517
1518 if (extend) {
1519 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1520 shift = 8;
1521 } else {
1522 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1523 shift = 4;
1524 }
1525
1526 if (priv->prs_shadow[tid].valid) {
1527
1528 pe.index = tid;
1529 mvpp2_prs_hw_read(priv, &pe);
1530 } else {
1531
1532 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1533 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1534 pe.index = tid;
1535
1536
1537 mvpp2_prs_sram_shift_set(&pe, shift,
1538 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1539
1540
1541 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1542
1543 if (tagged) {
1544
1545 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1546 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1547 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1548
1549 mvpp2_prs_sram_ai_update(&pe, 0,
1550 MVPP2_PRS_SRAM_AI_MASK);
1551
1552 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1553 } else {
1554
1555 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1556 MVPP2_PRS_RI_VLAN_MASK);
1557 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1558 }
1559
1560
1561 mvpp2_prs_tcam_port_map_set(&pe, 0);
1562 }
1563
1564
1565 mvpp2_prs_tcam_port_set(&pe, port, add);
1566
1567 mvpp2_prs_hw_write(priv, &pe);
1568}
1569
1570
1571static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1572 bool add, bool tagged, bool extend)
1573{
1574 struct mvpp2_prs_entry pe;
1575 int tid, shift, port_mask;
1576
1577 if (extend) {
1578 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1579 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1580 port_mask = 0;
1581 shift = 8;
1582 } else {
1583 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1584 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1585 port_mask = MVPP2_PRS_PORT_MASK;
1586 shift = 4;
1587 }
1588
1589 if (priv->prs_shadow[tid].valid) {
1590
1591 pe.index = tid;
1592 mvpp2_prs_hw_read(priv, &pe);
1593 } else {
1594
1595 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1596 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1597 pe.index = tid;
1598
1599
1600 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1601 mvpp2_prs_match_etype(&pe, 2, 0);
1602
1603 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1604 MVPP2_PRS_RI_DSA_MASK);
1605
1606 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1607 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1608
1609
1610 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1611
1612 if (tagged) {
1613
1614 mvpp2_prs_tcam_data_byte_set(&pe,
1615 MVPP2_ETH_TYPE_LEN + 2 + 3,
1616 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1617 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1618
1619 mvpp2_prs_sram_ai_update(&pe, 0,
1620 MVPP2_PRS_SRAM_AI_MASK);
1621
1622 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1623 } else {
1624
1625 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1626 MVPP2_PRS_RI_VLAN_MASK);
1627 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1628 }
1629
1630 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1631 }
1632
1633
1634 mvpp2_prs_tcam_port_set(&pe, port, add);
1635
1636 mvpp2_prs_hw_write(priv, &pe);
1637}
1638
1639
1640static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1641 unsigned short tpid, int ai)
1642{
1643 struct mvpp2_prs_entry *pe;
1644 int tid;
1645
1646 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1647 if (!pe)
1648 return NULL;
1649 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1650
1651
1652 for (tid = MVPP2_PE_FIRST_FREE_TID;
1653 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1654 unsigned int ri_bits, ai_bits;
1655 bool match;
1656
1657 if (!priv->prs_shadow[tid].valid ||
1658 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1659 continue;
1660
1661 pe->index = tid;
1662
1663 mvpp2_prs_hw_read(priv, pe);
1664 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1665 if (!match)
1666 continue;
1667
1668
1669 ri_bits = mvpp2_prs_sram_ri_get(pe);
1670 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1671
1672
1673 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1674
1675 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1676
1677 if (ai != ai_bits)
1678 continue;
1679
1680 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1681 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1682 return pe;
1683 }
1684 kfree(pe);
1685
1686 return NULL;
1687}
1688
1689
1690static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1691 unsigned int port_map)
1692{
1693 struct mvpp2_prs_entry *pe;
1694 int tid_aux, tid;
1695 int ret = 0;
1696
1697 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1698
1699 if (!pe) {
1700
1701 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1702 MVPP2_PE_FIRST_FREE_TID);
1703 if (tid < 0)
1704 return tid;
1705
1706 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1707 if (!pe)
1708 return -ENOMEM;
1709
1710
1711 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1712 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1713 unsigned int ri_bits;
1714
1715 if (!priv->prs_shadow[tid_aux].valid ||
1716 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1717 continue;
1718
1719 pe->index = tid_aux;
1720 mvpp2_prs_hw_read(priv, pe);
1721 ri_bits = mvpp2_prs_sram_ri_get(pe);
1722 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1723 MVPP2_PRS_RI_VLAN_DOUBLE)
1724 break;
1725 }
1726
1727 if (tid <= tid_aux) {
1728 ret = -EINVAL;
1729 goto error;
1730 }
1731
1732 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1733 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1734 pe->index = tid;
1735
1736 mvpp2_prs_match_etype(pe, 0, tpid);
1737
1738 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1739
1740 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1741 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1742
1743 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1744
1745 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1746 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1747 MVPP2_PRS_RI_VLAN_MASK);
1748 } else {
1749 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1750 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1751 MVPP2_PRS_RI_VLAN_MASK);
1752 }
1753 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1754
1755 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1756 }
1757
1758 mvpp2_prs_tcam_port_map_set(pe, port_map);
1759
1760 mvpp2_prs_hw_write(priv, pe);
1761
1762error:
1763 kfree(pe);
1764
1765 return ret;
1766}
1767
1768
1769static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1770{
1771 int i;
1772
1773 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1774 if (!priv->prs_double_vlans[i])
1775 return i;
1776 }
1777
1778 return -EINVAL;
1779}
1780
1781
1782static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1783 unsigned short tpid1,
1784 unsigned short tpid2)
1785{
1786 struct mvpp2_prs_entry *pe;
1787 int tid;
1788
1789 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1790 if (!pe)
1791 return NULL;
1792 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1793
1794
1795 for (tid = MVPP2_PE_FIRST_FREE_TID;
1796 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1797 unsigned int ri_mask;
1798 bool match;
1799
1800 if (!priv->prs_shadow[tid].valid ||
1801 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1802 continue;
1803
1804 pe->index = tid;
1805 mvpp2_prs_hw_read(priv, pe);
1806
1807 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1808 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1809
1810 if (!match)
1811 continue;
1812
1813 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1814 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1815 return pe;
1816 }
1817 kfree(pe);
1818
1819 return NULL;
1820}
1821
1822
1823static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1824 unsigned short tpid2,
1825 unsigned int port_map)
1826{
1827 struct mvpp2_prs_entry *pe;
1828 int tid_aux, tid, ai, ret = 0;
1829
1830 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1831
1832 if (!pe) {
1833
1834 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1835 MVPP2_PE_LAST_FREE_TID);
1836 if (tid < 0)
1837 return tid;
1838
1839 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1840 if (!pe)
1841 return -ENOMEM;
1842
1843
1844 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1845 if (ai < 0) {
1846 ret = ai;
1847 goto error;
1848 }
1849
1850
1851 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1852 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1853 unsigned int ri_bits;
1854
1855 if (!priv->prs_shadow[tid_aux].valid ||
1856 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1857 continue;
1858
1859 pe->index = tid_aux;
1860 mvpp2_prs_hw_read(priv, pe);
1861 ri_bits = mvpp2_prs_sram_ri_get(pe);
1862 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1863 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1864 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1865 break;
1866 }
1867
1868 if (tid >= tid_aux) {
1869 ret = -ERANGE;
1870 goto error;
1871 }
1872
1873 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1874 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1875 pe->index = tid;
1876
1877 priv->prs_double_vlans[ai] = true;
1878
1879 mvpp2_prs_match_etype(pe, 0, tpid1);
1880 mvpp2_prs_match_etype(pe, 4, tpid2);
1881
1882 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1883
1884 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1885 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1886 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1887 MVPP2_PRS_RI_VLAN_MASK);
1888 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1889 MVPP2_PRS_SRAM_AI_MASK);
1890
1891 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1892 }
1893
1894
1895 mvpp2_prs_tcam_port_map_set(pe, port_map);
1896 mvpp2_prs_hw_write(priv, pe);
1897
1898error:
1899 kfree(pe);
1900 return ret;
1901}
1902
1903
1904static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1905 unsigned int ri, unsigned int ri_mask)
1906{
1907 struct mvpp2_prs_entry pe;
1908 int tid;
1909
1910 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1911 (proto != IPPROTO_IGMP))
1912 return -EINVAL;
1913
1914
1915 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1916 MVPP2_PE_LAST_FREE_TID);
1917 if (tid < 0)
1918 return tid;
1919
1920 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1921 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1922 pe.index = tid;
1923
1924
1925 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1926 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1927
1928 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1929 sizeof(struct iphdr) - 4,
1930 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1931 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1932 MVPP2_PRS_IPV4_DIP_AI_BIT);
1933 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1934 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1935
1936 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1937 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1938
1939 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1940
1941
1942 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1943 mvpp2_prs_hw_write(priv, &pe);
1944
1945
1946 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1947 MVPP2_PE_LAST_FREE_TID);
1948 if (tid < 0)
1949 return tid;
1950
1951 pe.index = tid;
1952
1953 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1954 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1955 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1956
1957 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1958 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1959
1960
1961 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1962 mvpp2_prs_hw_write(priv, &pe);
1963
1964 return 0;
1965}
1966
1967
1968static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1969{
1970 struct mvpp2_prs_entry pe;
1971 int mask, tid;
1972
1973 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1974 MVPP2_PE_LAST_FREE_TID);
1975 if (tid < 0)
1976 return tid;
1977
1978 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1979 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1980 pe.index = tid;
1981
1982 switch (l3_cast) {
1983 case MVPP2_PRS_L3_MULTI_CAST:
1984 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
1985 MVPP2_PRS_IPV4_MC_MASK);
1986 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1987 MVPP2_PRS_RI_L3_ADDR_MASK);
1988 break;
1989 case MVPP2_PRS_L3_BROAD_CAST:
1990 mask = MVPP2_PRS_IPV4_BC_MASK;
1991 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
1992 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
1993 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
1994 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
1995 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
1996 MVPP2_PRS_RI_L3_ADDR_MASK);
1997 break;
1998 default:
1999 return -EINVAL;
2000 }
2001
2002
2003 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2004 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2005
2006 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2007 MVPP2_PRS_IPV4_DIP_AI_BIT);
2008
2009 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2010
2011
2012 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2013 mvpp2_prs_hw_write(priv, &pe);
2014
2015 return 0;
2016}
2017
2018
2019static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2020 unsigned int ri, unsigned int ri_mask)
2021{
2022 struct mvpp2_prs_entry pe;
2023 int tid;
2024
2025 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2026 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2027 return -EINVAL;
2028
2029 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2030 MVPP2_PE_LAST_FREE_TID);
2031 if (tid < 0)
2032 return tid;
2033
2034 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2035 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2036 pe.index = tid;
2037
2038
2039 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2040 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2041 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2042 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2043 sizeof(struct ipv6hdr) - 6,
2044 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2045
2046 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2047 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2048 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2049
2050 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2051
2052
2053 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2054 mvpp2_prs_hw_write(priv, &pe);
2055
2056 return 0;
2057}
2058
2059
2060static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2061{
2062 struct mvpp2_prs_entry pe;
2063 int tid;
2064
2065 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2066 return -EINVAL;
2067
2068 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2069 MVPP2_PE_LAST_FREE_TID);
2070 if (tid < 0)
2071 return tid;
2072
2073 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2074 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2075 pe.index = tid;
2076
2077
2078 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2079 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2080 MVPP2_PRS_RI_L3_ADDR_MASK);
2081 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2082 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2083
2084 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2085
2086 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2087 MVPP2_PRS_IPV6_MC_MASK);
2088 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2089
2090 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2091
2092
2093 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2094 mvpp2_prs_hw_write(priv, &pe);
2095
2096 return 0;
2097}
2098
2099
2100static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2101 int lu_max, int offset)
2102{
2103 u32 val;
2104
2105
2106 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2107 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2108 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2109 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2110
2111
2112 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2113 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2114 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2115 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2116
2117
2118
2119
2120 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2121 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2122 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2123 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2124}
2125
2126
2127static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2128{
2129 struct mvpp2_prs_entry pe;
2130 int port;
2131
2132 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2133 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2134 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2135 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2136
2137
2138 mvpp2_prs_tcam_port_map_set(&pe, 0);
2139
2140
2141 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2142 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2143
2144
2145 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2146 mvpp2_prs_hw_write(priv, &pe);
2147 }
2148}
2149
2150
2151static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2152{
2153 struct mvpp2_prs_entry pe;
2154
2155 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2156
2157 pe.index = MVPP2_PE_MH_DEFAULT;
2158 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2159 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2160 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2161 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2162
2163
2164 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2165
2166
2167 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2168 mvpp2_prs_hw_write(priv, &pe);
2169}
2170
2171
2172
2173
2174static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2175{
2176 struct mvpp2_prs_entry pe;
2177
2178 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2179
2180
2181 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2182 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2183
2184 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2185 MVPP2_PRS_RI_DROP_MASK);
2186 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2187 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2188
2189
2190 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2191
2192
2193 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2194 mvpp2_prs_hw_write(priv, &pe);
2195
2196
2197 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2198 mvpp2_prs_mac_promisc_set(priv, 0, false);
2199 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2200 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2201}
2202
2203
2204static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2205{
2206 struct mvpp2_prs_entry pe;
2207
2208
2209 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2210 MVPP2_PRS_EDSA);
2211
2212
2213 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2214
2215
2216 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2217 MVPP2_PRS_DSA);
2218
2219
2220 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2221
2222
2223 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2224 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2225
2226
2227 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2228 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2229
2230
2231 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2232 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2233
2234
2235 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2236 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2237
2238
2239 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2240 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2241 pe.index = MVPP2_PE_DSA_DEFAULT;
2242 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2243
2244
2245 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2246 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2247
2248
2249 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2250
2251
2252 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2253
2254 mvpp2_prs_hw_write(priv, &pe);
2255}
2256
2257
2258static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2259{
2260 struct mvpp2_prs_entry pe;
2261 int tid;
2262
2263
2264 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2265 MVPP2_PE_LAST_FREE_TID);
2266 if (tid < 0)
2267 return tid;
2268
2269 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2270 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2271 pe.index = tid;
2272
2273 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2274
2275 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2276 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2277 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2278 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2279 MVPP2_PRS_RI_PPPOE_MASK);
2280
2281
2282 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2283 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2284 priv->prs_shadow[pe.index].finish = false;
2285 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2286 MVPP2_PRS_RI_PPPOE_MASK);
2287 mvpp2_prs_hw_write(priv, &pe);
2288
2289
2290 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2291 MVPP2_PE_LAST_FREE_TID);
2292 if (tid < 0)
2293 return tid;
2294
2295 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2296 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2297 pe.index = tid;
2298
2299 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2300
2301
2302 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2303 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2304 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2305 MVPP2_PRS_RI_L3_PROTO_MASK);
2306
2307 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2308 MVPP2_ETH_TYPE_LEN,
2309 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2310
2311
2312 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2313 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2314 priv->prs_shadow[pe.index].finish = true;
2315 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2316 MVPP2_PRS_RI_L3_PROTO_MASK);
2317 mvpp2_prs_hw_write(priv, &pe);
2318
2319
2320 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2321 MVPP2_PE_LAST_FREE_TID);
2322 if (tid < 0)
2323 return tid;
2324
2325 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2326 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2327 pe.index = tid;
2328
2329 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2330
2331
2332 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2333 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2334 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2335 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2336 MVPP2_PRS_RI_CPU_CODE_MASK |
2337 MVPP2_PRS_RI_UDF3_MASK);
2338
2339 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2340 MVPP2_ETH_TYPE_LEN,
2341 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2342
2343
2344 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2345 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2346 priv->prs_shadow[pe.index].finish = true;
2347 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2348 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2349 MVPP2_PRS_RI_CPU_CODE_MASK |
2350 MVPP2_PRS_RI_UDF3_MASK);
2351 mvpp2_prs_hw_write(priv, &pe);
2352
2353
2354 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2355 MVPP2_PE_LAST_FREE_TID);
2356 if (tid < 0)
2357 return tid;
2358
2359 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2360 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2361 pe.index = tid;
2362
2363 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2364 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2365 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2366 MVPP2_PRS_IPV4_HEAD_MASK |
2367 MVPP2_PRS_IPV4_IHL_MASK);
2368
2369 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2370 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2371 MVPP2_PRS_RI_L3_PROTO_MASK);
2372
2373 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2374 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2375
2376 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2377 MVPP2_ETH_TYPE_LEN,
2378 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2379
2380
2381 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2382 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2383 priv->prs_shadow[pe.index].finish = false;
2384 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2385 MVPP2_PRS_RI_L3_PROTO_MASK);
2386 mvpp2_prs_hw_write(priv, &pe);
2387
2388
2389 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2390 MVPP2_PE_LAST_FREE_TID);
2391 if (tid < 0)
2392 return tid;
2393
2394 pe.index = tid;
2395
2396
2397 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2398 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2399
2400 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2401 MVPP2_PRS_IPV4_HEAD,
2402 MVPP2_PRS_IPV4_HEAD_MASK);
2403
2404
2405 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2406 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2407 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2408 MVPP2_PRS_RI_L3_PROTO_MASK);
2409
2410
2411 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2412 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2413 priv->prs_shadow[pe.index].finish = false;
2414 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2415 MVPP2_PRS_RI_L3_PROTO_MASK);
2416 mvpp2_prs_hw_write(priv, &pe);
2417
2418
2419 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2420 MVPP2_PE_LAST_FREE_TID);
2421 if (tid < 0)
2422 return tid;
2423
2424 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2425 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2426 pe.index = tid;
2427
2428 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2429
2430
2431 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2432 MVPP2_MAX_L3_ADDR_SIZE,
2433 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2434 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2435 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2436 MVPP2_PRS_RI_L3_PROTO_MASK);
2437
2438 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2439 MVPP2_ETH_TYPE_LEN,
2440 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2441
2442 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2443 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2444 priv->prs_shadow[pe.index].finish = false;
2445 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2446 MVPP2_PRS_RI_L3_PROTO_MASK);
2447 mvpp2_prs_hw_write(priv, &pe);
2448
2449
2450 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2451 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2452 pe.index = MVPP2_PE_ETH_TYPE_UN;
2453
2454
2455 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2456
2457
2458 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2459 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2460 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2461 MVPP2_PRS_RI_L3_PROTO_MASK);
2462
2463 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2464 MVPP2_ETH_TYPE_LEN,
2465 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2466
2467
2468 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2469 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2470 priv->prs_shadow[pe.index].finish = true;
2471 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2472 MVPP2_PRS_RI_L3_PROTO_MASK);
2473 mvpp2_prs_hw_write(priv, &pe);
2474
2475 return 0;
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2486{
2487 struct mvpp2_prs_entry pe;
2488 int err;
2489
2490 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2491 MVPP2_PRS_DBL_VLANS_MAX,
2492 GFP_KERNEL);
2493 if (!priv->prs_double_vlans)
2494 return -ENOMEM;
2495
2496
2497 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2498 MVPP2_PRS_PORT_MASK);
2499 if (err)
2500 return err;
2501
2502
2503 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2504 MVPP2_PRS_PORT_MASK);
2505 if (err)
2506 return err;
2507
2508
2509 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2510 MVPP2_PRS_PORT_MASK);
2511 if (err)
2512 return err;
2513
2514
2515 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2516 MVPP2_PRS_PORT_MASK);
2517 if (err)
2518 return err;
2519
2520
2521 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2522 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2523 pe.index = MVPP2_PE_VLAN_DBL;
2524
2525 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2526
2527 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2528 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2529 MVPP2_PRS_RI_VLAN_MASK);
2530
2531 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2532 MVPP2_PRS_DBL_VLAN_AI_BIT);
2533
2534 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2535
2536
2537 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2538 mvpp2_prs_hw_write(priv, &pe);
2539
2540
2541 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2542 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2543 pe.index = MVPP2_PE_VLAN_NONE;
2544
2545 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2546 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2547 MVPP2_PRS_RI_VLAN_MASK);
2548
2549
2550 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2551
2552
2553 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2554 mvpp2_prs_hw_write(priv, &pe);
2555
2556 return 0;
2557}
2558
2559
2560static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2561{
2562 struct mvpp2_prs_entry pe;
2563 int tid;
2564
2565
2566 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2567 MVPP2_PE_LAST_FREE_TID);
2568 if (tid < 0)
2569 return tid;
2570
2571 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2572 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2573 pe.index = tid;
2574
2575 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2576
2577 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2578 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2579 MVPP2_PRS_RI_L3_PROTO_MASK);
2580
2581 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2582 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2583
2584 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2585 MVPP2_ETH_TYPE_LEN,
2586 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2587
2588
2589 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2590 mvpp2_prs_hw_write(priv, &pe);
2591
2592
2593 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2594 MVPP2_PE_LAST_FREE_TID);
2595 if (tid < 0)
2596 return tid;
2597
2598 pe.index = tid;
2599
2600 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2601 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2602 MVPP2_PRS_IPV4_HEAD_MASK |
2603 MVPP2_PRS_IPV4_IHL_MASK);
2604
2605
2606 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2607 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2608 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2609 MVPP2_PRS_RI_L3_PROTO_MASK);
2610
2611
2612 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2613 mvpp2_prs_hw_write(priv, &pe);
2614
2615
2616 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2617 MVPP2_PE_LAST_FREE_TID);
2618 if (tid < 0)
2619 return tid;
2620
2621 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2622 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2623 pe.index = tid;
2624
2625 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2626
2627 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2628 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2629 MVPP2_PRS_RI_L3_PROTO_MASK);
2630
2631 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2632 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2633
2634 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2635 MVPP2_ETH_TYPE_LEN,
2636 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2637
2638
2639 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2640 mvpp2_prs_hw_write(priv, &pe);
2641
2642
2643 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2644 MVPP2_PE_LAST_FREE_TID);
2645 if (tid < 0)
2646 return tid;
2647
2648 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2649 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2650 pe.index = tid;
2651
2652 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2653 MVPP2_PRS_RI_L3_PROTO_MASK);
2654
2655
2656 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2657 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2658
2659 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2660 MVPP2_ETH_TYPE_LEN,
2661 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2662
2663
2664 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2665 mvpp2_prs_hw_write(priv, &pe);
2666
2667 return 0;
2668}
2669
2670
2671static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2672{
2673 struct mvpp2_prs_entry pe;
2674 int err;
2675
2676
2677 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2678 MVPP2_PRS_RI_L4_PROTO_MASK);
2679 if (err)
2680 return err;
2681
2682 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2683 MVPP2_PRS_RI_L4_PROTO_MASK);
2684 if (err)
2685 return err;
2686
2687 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2688 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2689 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2690 MVPP2_PRS_RI_CPU_CODE_MASK |
2691 MVPP2_PRS_RI_UDF3_MASK);
2692 if (err)
2693 return err;
2694
2695
2696 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2697 if (err)
2698 return err;
2699
2700
2701 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2702 if (err)
2703 return err;
2704
2705
2706 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2707 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2708 pe.index = MVPP2_PE_IP4_PROTO_UN;
2709
2710
2711 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2712 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2713
2714 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2715 sizeof(struct iphdr) - 4,
2716 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2717 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2718 MVPP2_PRS_IPV4_DIP_AI_BIT);
2719 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2720 MVPP2_PRS_RI_L4_PROTO_MASK);
2721
2722 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2723
2724 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2725
2726
2727 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2728 mvpp2_prs_hw_write(priv, &pe);
2729
2730
2731 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2732 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2733 pe.index = MVPP2_PE_IP4_ADDR_UN;
2734
2735
2736 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2737 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2738 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2739 MVPP2_PRS_RI_L3_ADDR_MASK);
2740
2741 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2742 MVPP2_PRS_IPV4_DIP_AI_BIT);
2743
2744 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2745
2746
2747 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2748 mvpp2_prs_hw_write(priv, &pe);
2749
2750 return 0;
2751}
2752
2753
2754static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2755{
2756 struct mvpp2_prs_entry pe;
2757 int tid, err;
2758
2759
2760 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2761 MVPP2_PRS_RI_L4_TCP,
2762 MVPP2_PRS_RI_L4_PROTO_MASK);
2763 if (err)
2764 return err;
2765
2766 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2767 MVPP2_PRS_RI_L4_UDP,
2768 MVPP2_PRS_RI_L4_PROTO_MASK);
2769 if (err)
2770 return err;
2771
2772 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2773 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2774 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2775 MVPP2_PRS_RI_CPU_CODE_MASK |
2776 MVPP2_PRS_RI_UDF3_MASK);
2777 if (err)
2778 return err;
2779
2780
2781
2782 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2783 MVPP2_PRS_RI_UDF7_IP6_LITE,
2784 MVPP2_PRS_RI_UDF7_MASK);
2785 if (err)
2786 return err;
2787
2788
2789 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2790 if (err)
2791 return err;
2792
2793
2794 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2795 MVPP2_PE_LAST_FREE_TID);
2796 if (tid < 0)
2797 return tid;
2798
2799 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2800 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2801 pe.index = tid;
2802
2803
2804 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2805 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2806 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2807 MVPP2_PRS_RI_DROP_MASK,
2808 MVPP2_PRS_RI_L3_PROTO_MASK |
2809 MVPP2_PRS_RI_DROP_MASK);
2810
2811 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2812 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2813 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2814
2815
2816 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2817 mvpp2_prs_hw_write(priv, &pe);
2818
2819
2820 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2821 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2822 pe.index = MVPP2_PE_IP6_PROTO_UN;
2823
2824
2825 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2826 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2827 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2828 MVPP2_PRS_RI_L4_PROTO_MASK);
2829
2830 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2831 sizeof(struct ipv6hdr) - 4,
2832 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2833
2834 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2835 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2836
2837 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2838
2839
2840 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2841 mvpp2_prs_hw_write(priv, &pe);
2842
2843
2844 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2845 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2846 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2847
2848
2849 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2850 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2851 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2852 MVPP2_PRS_RI_L4_PROTO_MASK);
2853
2854 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2855 MVPP2_PRS_IPV6_EXT_AI_BIT);
2856
2857 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2858
2859
2860 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2861 mvpp2_prs_hw_write(priv, &pe);
2862
2863
2864 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2865 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2866 pe.index = MVPP2_PE_IP6_ADDR_UN;
2867
2868
2869 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2870 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2871 MVPP2_PRS_RI_L3_ADDR_MASK);
2872 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2873 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2874
2875 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2876
2877 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2878
2879 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2880
2881
2882 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2883 mvpp2_prs_hw_write(priv, &pe);
2884
2885 return 0;
2886}
2887
2888
2889static int mvpp2_prs_default_init(struct platform_device *pdev,
2890 struct mvpp2 *priv)
2891{
2892 int err, index, i;
2893
2894
2895 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2896
2897
2898 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2899 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2900 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2901 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2902
2903 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2904 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2905 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2906 }
2907
2908
2909 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2910 mvpp2_prs_hw_inv(priv, index);
2911
2912 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2913 sizeof(struct mvpp2_prs_shadow),
2914 GFP_KERNEL);
2915 if (!priv->prs_shadow)
2916 return -ENOMEM;
2917
2918
2919 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2920 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2921 MVPP2_PRS_PORT_LU_MAX, 0);
2922
2923 mvpp2_prs_def_flow_init(priv);
2924
2925 mvpp2_prs_mh_init(priv);
2926
2927 mvpp2_prs_mac_init(priv);
2928
2929 mvpp2_prs_dsa_init(priv);
2930
2931 err = mvpp2_prs_etype_init(priv);
2932 if (err)
2933 return err;
2934
2935 err = mvpp2_prs_vlan_init(pdev, priv);
2936 if (err)
2937 return err;
2938
2939 err = mvpp2_prs_pppoe_init(priv);
2940 if (err)
2941 return err;
2942
2943 err = mvpp2_prs_ip6_init(priv);
2944 if (err)
2945 return err;
2946
2947 err = mvpp2_prs_ip4_init(priv);
2948 if (err)
2949 return err;
2950
2951 return 0;
2952}
2953
2954
2955static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2956 const u8 *da, unsigned char *mask)
2957{
2958 unsigned char tcam_byte, tcam_mask;
2959 int index;
2960
2961 for (index = 0; index < ETH_ALEN; index++) {
2962 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2963 if (tcam_mask != mask[index])
2964 return false;
2965
2966 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2967 return false;
2968 }
2969
2970 return true;
2971}
2972
2973
2974static struct mvpp2_prs_entry *
2975mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2976 unsigned char *mask, int udf_type)
2977{
2978 struct mvpp2_prs_entry *pe;
2979 int tid;
2980
2981 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2982 if (!pe)
2983 return NULL;
2984 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2985
2986
2987 for (tid = MVPP2_PE_FIRST_FREE_TID;
2988 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2989 unsigned int entry_pmap;
2990
2991 if (!priv->prs_shadow[tid].valid ||
2992 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2993 (priv->prs_shadow[tid].udf != udf_type))
2994 continue;
2995
2996 pe->index = tid;
2997 mvpp2_prs_hw_read(priv, pe);
2998 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2999
3000 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3001 entry_pmap == pmap)
3002 return pe;
3003 }
3004 kfree(pe);
3005
3006 return NULL;
3007}
3008
3009
3010static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3011 const u8 *da, bool add)
3012{
3013 struct mvpp2_prs_entry *pe;
3014 unsigned int pmap, len, ri;
3015 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3016 int tid;
3017
3018
3019 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3020 MVPP2_PRS_UDF_MAC_DEF);
3021
3022
3023 if (!pe) {
3024 if (!add)
3025 return 0;
3026
3027
3028
3029 for (tid = MVPP2_PE_FIRST_FREE_TID;
3030 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3031 if (priv->prs_shadow[tid].valid &&
3032 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3033 (priv->prs_shadow[tid].udf ==
3034 MVPP2_PRS_UDF_MAC_RANGE))
3035 break;
3036
3037
3038 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3039 tid - 1);
3040 if (tid < 0)
3041 return tid;
3042
3043 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3044 if (!pe)
3045 return -1;
3046 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3047 pe->index = tid;
3048
3049
3050 mvpp2_prs_tcam_port_map_set(pe, 0);
3051 }
3052
3053
3054 mvpp2_prs_tcam_port_set(pe, port, add);
3055
3056
3057 pmap = mvpp2_prs_tcam_port_map_get(pe);
3058 if (pmap == 0) {
3059 if (add) {
3060 kfree(pe);
3061 return -1;
3062 }
3063 mvpp2_prs_hw_inv(priv, pe->index);
3064 priv->prs_shadow[pe->index].valid = false;
3065 kfree(pe);
3066 return 0;
3067 }
3068
3069
3070 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3071
3072
3073 len = ETH_ALEN;
3074 while (len--)
3075 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3076
3077
3078 if (is_broadcast_ether_addr(da))
3079 ri = MVPP2_PRS_RI_L2_BCAST;
3080 else if (is_multicast_ether_addr(da))
3081 ri = MVPP2_PRS_RI_L2_MCAST;
3082 else
3083 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3084
3085 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3086 MVPP2_PRS_RI_MAC_ME_MASK);
3087 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3088 MVPP2_PRS_RI_MAC_ME_MASK);
3089
3090
3091 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3092 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3093
3094
3095 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3096 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3097 mvpp2_prs_hw_write(priv, pe);
3098
3099 kfree(pe);
3100
3101 return 0;
3102}
3103
3104static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3105{
3106 struct mvpp2_port *port = netdev_priv(dev);
3107 int err;
3108
3109
3110 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3111 false);
3112 if (err)
3113 return err;
3114
3115
3116 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3117 if (err)
3118 return err;
3119
3120
3121 ether_addr_copy(dev->dev_addr, da);
3122
3123 return 0;
3124}
3125
3126
3127static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3128{
3129 struct mvpp2_prs_entry pe;
3130 int index, tid;
3131
3132 for (tid = MVPP2_PE_FIRST_FREE_TID;
3133 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3134 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3135
3136 if (!priv->prs_shadow[tid].valid ||
3137 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3138 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3139 continue;
3140
3141
3142 pe.index = tid;
3143 mvpp2_prs_hw_read(priv, &pe);
3144
3145
3146 for (index = 0; index < ETH_ALEN; index++)
3147 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3148 &da_mask[index]);
3149
3150 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3151
3152 mvpp2_prs_mac_da_accept(priv, port, da, false);
3153 }
3154}
3155
3156static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3157{
3158 switch (type) {
3159 case MVPP2_TAG_TYPE_EDSA:
3160
3161 mvpp2_prs_dsa_tag_set(priv, port, true,
3162 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3163 mvpp2_prs_dsa_tag_set(priv, port, true,
3164 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3165
3166 mvpp2_prs_dsa_tag_set(priv, port, false,
3167 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3168 mvpp2_prs_dsa_tag_set(priv, port, false,
3169 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3170 break;
3171
3172 case MVPP2_TAG_TYPE_DSA:
3173
3174 mvpp2_prs_dsa_tag_set(priv, port, true,
3175 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3176 mvpp2_prs_dsa_tag_set(priv, port, true,
3177 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3178
3179 mvpp2_prs_dsa_tag_set(priv, port, false,
3180 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3181 mvpp2_prs_dsa_tag_set(priv, port, false,
3182 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3183 break;
3184
3185 case MVPP2_TAG_TYPE_MH:
3186 case MVPP2_TAG_TYPE_NONE:
3187
3188 mvpp2_prs_dsa_tag_set(priv, port, false,
3189 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3190 mvpp2_prs_dsa_tag_set(priv, port, false,
3191 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3192 mvpp2_prs_dsa_tag_set(priv, port, false,
3193 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3194 mvpp2_prs_dsa_tag_set(priv, port, false,
3195 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3196 break;
3197
3198 default:
3199 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3200 return -EINVAL;
3201 }
3202
3203 return 0;
3204}
3205
3206
3207static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3208{
3209 struct mvpp2_prs_entry *pe;
3210 int tid;
3211
3212 pe = mvpp2_prs_flow_find(port->priv, port->id);
3213
3214
3215 if (!pe) {
3216
3217 tid = mvpp2_prs_tcam_first_free(port->priv,
3218 MVPP2_PE_LAST_FREE_TID,
3219 MVPP2_PE_FIRST_FREE_TID);
3220 if (tid < 0)
3221 return tid;
3222
3223 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3224 if (!pe)
3225 return -ENOMEM;
3226
3227 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3228 pe->index = tid;
3229
3230
3231 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3232 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3233
3234
3235 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3236 }
3237
3238 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3239 mvpp2_prs_hw_write(port->priv, pe);
3240 kfree(pe);
3241
3242 return 0;
3243}
3244
3245
3246
3247
3248static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3249 struct mvpp2_cls_flow_entry *fe)
3250{
3251 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3252 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3253 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3254 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3255}
3256
3257
3258static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3259 struct mvpp2_cls_lookup_entry *le)
3260{
3261 u32 val;
3262
3263 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3264 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3265 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3266}
3267
3268
3269static void mvpp2_cls_init(struct mvpp2 *priv)
3270{
3271 struct mvpp2_cls_lookup_entry le;
3272 struct mvpp2_cls_flow_entry fe;
3273 int index;
3274
3275
3276 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3277
3278
3279 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
3280 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3281 fe.index = index;
3282 mvpp2_cls_flow_write(priv, &fe);
3283 }
3284
3285
3286 le.data = 0;
3287 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3288 le.lkpid = index;
3289 le.way = 0;
3290 mvpp2_cls_lookup_write(priv, &le);
3291
3292 le.way = 1;
3293 mvpp2_cls_lookup_write(priv, &le);
3294 }
3295}
3296
3297static void mvpp2_cls_port_config(struct mvpp2_port *port)
3298{
3299 struct mvpp2_cls_lookup_entry le;
3300 u32 val;
3301
3302
3303 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3304 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3305 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3306
3307
3308
3309
3310 le.lkpid = port->id;
3311 le.way = 0;
3312 le.data = 0;
3313
3314
3315 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3316 le.data |= port->first_rxq;
3317
3318
3319 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3320
3321
3322 mvpp2_cls_lookup_write(port->priv, &le);
3323}
3324
3325
3326static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3327{
3328 u32 val;
3329
3330 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3331 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3332
3333 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3334 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3335
3336 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3337 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3338 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3339}
3340
3341
3342
3343
3344static int mvpp2_bm_pool_create(struct platform_device *pdev,
3345 struct mvpp2 *priv,
3346 struct mvpp2_bm_pool *bm_pool, int size)
3347{
3348 int size_bytes;
3349 u32 val;
3350
3351 size_bytes = sizeof(u32) * size;
3352 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3353 &bm_pool->phys_addr,
3354 GFP_KERNEL);
3355 if (!bm_pool->virt_addr)
3356 return -ENOMEM;
3357
3358 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3359 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3360 bm_pool->phys_addr);
3361 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3362 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3363 return -ENOMEM;
3364 }
3365
3366 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3367 bm_pool->phys_addr);
3368 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3369
3370 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3371 val |= MVPP2_BM_START_MASK;
3372 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3373
3374 bm_pool->type = MVPP2_BM_FREE;
3375 bm_pool->size = size;
3376 bm_pool->pkt_size = 0;
3377 bm_pool->buf_num = 0;
3378 atomic_set(&bm_pool->in_use, 0);
3379 spin_lock_init(&bm_pool->lock);
3380
3381 return 0;
3382}
3383
3384
3385static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3386 struct mvpp2_bm_pool *bm_pool,
3387 int buf_size)
3388{
3389 u32 val;
3390
3391 bm_pool->buf_size = buf_size;
3392
3393 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3394 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3395}
3396
3397
3398static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
3399{
3400 int i;
3401
3402 for (i = 0; i < bm_pool->buf_num; i++) {
3403 u32 vaddr;
3404
3405
3406 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3407 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3408 if (!vaddr)
3409 break;
3410 dev_kfree_skb_any((struct sk_buff *)vaddr);
3411 }
3412
3413
3414 bm_pool->buf_num -= i;
3415}
3416
3417
3418static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3419 struct mvpp2 *priv,
3420 struct mvpp2_bm_pool *bm_pool)
3421{
3422 u32 val;
3423
3424 mvpp2_bm_bufs_free(priv, bm_pool);
3425 if (bm_pool->buf_num) {
3426 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3427 return 0;
3428 }
3429
3430 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3431 val |= MVPP2_BM_STOP_MASK;
3432 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3433
3434 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3435 bm_pool->virt_addr,
3436 bm_pool->phys_addr);
3437 return 0;
3438}
3439
3440static int mvpp2_bm_pools_init(struct platform_device *pdev,
3441 struct mvpp2 *priv)
3442{
3443 int i, err, size;
3444 struct mvpp2_bm_pool *bm_pool;
3445
3446
3447 size = MVPP2_BM_POOL_SIZE_MAX;
3448 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3449 bm_pool = &priv->bm_pools[i];
3450 bm_pool->id = i;
3451 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3452 if (err)
3453 goto err_unroll_pools;
3454 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3455 }
3456 return 0;
3457
3458err_unroll_pools:
3459 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3460 for (i = i - 1; i >= 0; i--)
3461 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3462 return err;
3463}
3464
3465static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3466{
3467 int i, err;
3468
3469 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3470
3471 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3472
3473 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3474 }
3475
3476
3477 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3478 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3479 if (!priv->bm_pools)
3480 return -ENOMEM;
3481
3482 err = mvpp2_bm_pools_init(pdev, priv);
3483 if (err < 0)
3484 return err;
3485 return 0;
3486}
3487
3488
3489static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3490 int lrxq, int long_pool)
3491{
3492 u32 val;
3493 int prxq;
3494
3495
3496 prxq = port->rxqs[lrxq]->id;
3497
3498 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3499 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3500 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3501 MVPP2_RXQ_POOL_LONG_MASK);
3502
3503 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3504}
3505
3506
3507static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3508 int lrxq, int short_pool)
3509{
3510 u32 val;
3511 int prxq;
3512
3513
3514 prxq = port->rxqs[lrxq]->id;
3515
3516 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3517 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3518 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3519 MVPP2_RXQ_POOL_SHORT_MASK);
3520
3521 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3522}
3523
3524
3525static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3526 struct mvpp2_bm_pool *bm_pool,
3527 dma_addr_t *buf_phys_addr,
3528 gfp_t gfp_mask)
3529{
3530 struct sk_buff *skb;
3531 dma_addr_t phys_addr;
3532
3533 skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3534 if (!skb)
3535 return NULL;
3536
3537 phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3538 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3539 DMA_FROM_DEVICE);
3540 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3541 dev_kfree_skb_any(skb);
3542 return NULL;
3543 }
3544 *buf_phys_addr = phys_addr;
3545
3546 return skb;
3547}
3548
3549
3550static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3551{
3552 u32 bm;
3553
3554 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3555 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3556
3557 return bm;
3558}
3559
3560
3561static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3562{
3563 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3564}
3565
3566
3567static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3568 u32 buf_phys_addr, u32 buf_virt_addr)
3569{
3570 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3571 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3572}
3573
3574
3575static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3576 u32 buf_phys_addr, u32 buf_virt_addr,
3577 int mc_id)
3578{
3579 u32 val = 0;
3580
3581 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3582 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3583
3584 mvpp2_bm_pool_put(port, pool,
3585 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3586 buf_virt_addr);
3587}
3588
3589
3590static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3591 u32 phys_addr, u32 cookie)
3592{
3593 int pool = mvpp2_bm_cookie_pool_get(bm);
3594
3595 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3596}
3597
3598
3599static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3600 struct mvpp2_bm_pool *bm_pool, int buf_num)
3601{
3602 struct sk_buff *skb;
3603 int i, buf_size, total_size;
3604 u32 bm;
3605 dma_addr_t phys_addr;
3606
3607 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3608 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3609
3610 if (buf_num < 0 ||
3611 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3612 netdev_err(port->dev,
3613 "cannot allocate %d buffers for pool %d\n",
3614 buf_num, bm_pool->id);
3615 return 0;
3616 }
3617
3618 bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
3619 for (i = 0; i < buf_num; i++) {
3620 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3621 if (!skb)
3622 break;
3623
3624 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
3625 }
3626
3627
3628 bm_pool->buf_num += i;
3629 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
3630
3631 netdev_dbg(port->dev,
3632 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3633 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3634 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3635
3636 netdev_dbg(port->dev,
3637 "%s pool %d: %d of %d buffers added\n",
3638 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3639 bm_pool->id, i, buf_num);
3640 return i;
3641}
3642
3643
3644
3645
3646static struct mvpp2_bm_pool *
3647mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3648 int pkt_size)
3649{
3650 unsigned long flags = 0;
3651 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3652 int num;
3653
3654 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3655 netdev_err(port->dev, "mixing pool types is forbidden\n");
3656 return NULL;
3657 }
3658
3659 spin_lock_irqsave(&new_pool->lock, flags);
3660
3661 if (new_pool->type == MVPP2_BM_FREE)
3662 new_pool->type = type;
3663
3664
3665
3666
3667 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3668 (new_pool->pkt_size == 0)) {
3669 int pkts_num;
3670
3671
3672
3673
3674 pkts_num = new_pool->buf_num;
3675 if (pkts_num == 0)
3676 pkts_num = type == MVPP2_BM_SWF_LONG ?
3677 MVPP2_BM_LONG_BUF_NUM :
3678 MVPP2_BM_SHORT_BUF_NUM;
3679 else
3680 mvpp2_bm_bufs_free(port->priv, new_pool);
3681
3682 new_pool->pkt_size = pkt_size;
3683
3684
3685 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3686 if (num != pkts_num) {
3687 WARN(1, "pool %d: %d of %d allocated\n",
3688 new_pool->id, num, pkts_num);
3689
3690 spin_unlock_irqrestore(&new_pool->lock, flags);
3691 return NULL;
3692 }
3693 }
3694
3695 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3696 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3697
3698 spin_unlock_irqrestore(&new_pool->lock, flags);
3699
3700 return new_pool;
3701}
3702
3703
3704static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3705{
3706 unsigned long flags = 0;
3707 int rxq;
3708
3709 if (!port->pool_long) {
3710 port->pool_long =
3711 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3712 MVPP2_BM_SWF_LONG,
3713 port->pkt_size);
3714 if (!port->pool_long)
3715 return -ENOMEM;
3716
3717 spin_lock_irqsave(&port->pool_long->lock, flags);
3718 port->pool_long->port_map |= (1 << port->id);
3719 spin_unlock_irqrestore(&port->pool_long->lock, flags);
3720
3721 for (rxq = 0; rxq < rxq_number; rxq++)
3722 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3723 }
3724
3725 if (!port->pool_short) {
3726 port->pool_short =
3727 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3728 MVPP2_BM_SWF_SHORT,
3729 MVPP2_BM_SHORT_PKT_SIZE);
3730 if (!port->pool_short)
3731 return -ENOMEM;
3732
3733 spin_lock_irqsave(&port->pool_short->lock, flags);
3734 port->pool_short->port_map |= (1 << port->id);
3735 spin_unlock_irqrestore(&port->pool_short->lock, flags);
3736
3737 for (rxq = 0; rxq < rxq_number; rxq++)
3738 mvpp2_rxq_short_pool_set(port, rxq,
3739 port->pool_short->id);
3740 }
3741
3742 return 0;
3743}
3744
3745static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3746{
3747 struct mvpp2_port *port = netdev_priv(dev);
3748 struct mvpp2_bm_pool *port_pool = port->pool_long;
3749 int num, pkts_num = port_pool->buf_num;
3750 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3751
3752
3753 mvpp2_bm_bufs_free(port->priv, port_pool);
3754 if (port_pool->buf_num) {
3755 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3756 return -EIO;
3757 }
3758
3759 port_pool->pkt_size = pkt_size;
3760 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3761 if (num != pkts_num) {
3762 WARN(1, "pool %d: %d of %d allocated\n",
3763 port_pool->id, num, pkts_num);
3764 return -EIO;
3765 }
3766
3767 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3768 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3769 dev->mtu = mtu;
3770 netdev_update_features(dev);
3771 return 0;
3772}
3773
3774static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3775{
3776 int cpu, cpu_mask = 0;
3777
3778 for_each_present_cpu(cpu)
3779 cpu_mask |= 1 << cpu;
3780 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3781 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3782}
3783
3784static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3785{
3786 int cpu, cpu_mask = 0;
3787
3788 for_each_present_cpu(cpu)
3789 cpu_mask |= 1 << cpu;
3790 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3791 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3792}
3793
3794
3795static void mvpp2_interrupts_mask(void *arg)
3796{
3797 struct mvpp2_port *port = arg;
3798
3799 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3800}
3801
3802
3803static void mvpp2_interrupts_unmask(void *arg)
3804{
3805 struct mvpp2_port *port = arg;
3806
3807 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3808 (MVPP2_CAUSE_MISC_SUM_MASK |
3809 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
3810 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3811}
3812
3813
3814
3815static void mvpp2_port_mii_set(struct mvpp2_port *port)
3816{
3817 u32 val;
3818
3819 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3820
3821 switch (port->phy_interface) {
3822 case PHY_INTERFACE_MODE_SGMII:
3823 val |= MVPP2_GMAC_INBAND_AN_MASK;
3824 break;
3825 case PHY_INTERFACE_MODE_RGMII:
3826 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3827 default:
3828 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3829 }
3830
3831 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3832}
3833
3834static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3835{
3836 u32 val;
3837
3838 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3839 val |= MVPP2_GMAC_FC_ADV_EN;
3840 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3841}
3842
3843static void mvpp2_port_enable(struct mvpp2_port *port)
3844{
3845 u32 val;
3846
3847 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3848 val |= MVPP2_GMAC_PORT_EN_MASK;
3849 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3850 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3851}
3852
3853static void mvpp2_port_disable(struct mvpp2_port *port)
3854{
3855 u32 val;
3856
3857 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3858 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3859 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3860}
3861
3862
3863static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3864{
3865 u32 val;
3866
3867 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3868 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3869 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3870}
3871
3872
3873static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3874{
3875 u32 val;
3876
3877 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3878
3879 if (port->speed == 1000)
3880 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3881 else
3882 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3883
3884 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3885 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3886 else
3887 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3888
3889 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3890}
3891
3892static void mvpp2_port_reset(struct mvpp2_port *port)
3893{
3894 u32 val;
3895
3896 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3897 ~MVPP2_GMAC_PORT_RESET_MASK;
3898 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3899
3900 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3901 MVPP2_GMAC_PORT_RESET_MASK)
3902 continue;
3903}
3904
3905
3906static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3907{
3908 u32 val;
3909
3910 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3911 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3912 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3913 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3914 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3915}
3916
3917
3918static void mvpp2_defaults_set(struct mvpp2_port *port)
3919{
3920 int tx_port_num, val, queue, ptxq, lrxq;
3921
3922
3923 if (port->flags & MVPP2_F_LOOPBACK)
3924 mvpp2_port_loopback_set(port);
3925
3926
3927 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3928 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3929
3930 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3931 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3932
3933
3934 tx_port_num = mvpp2_egress_port(port);
3935 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3936 tx_port_num);
3937 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3938
3939
3940 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3941 ptxq = mvpp2_txq_phys(port->id, queue);
3942 mvpp2_write(port->priv,
3943 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3944 }
3945
3946
3947
3948
3949 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3950 port->priv->tclk / USEC_PER_SEC);
3951 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3952 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3953 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3954 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3955 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3956 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3957 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3958
3959
3960 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3961 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3962 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3963
3964
3965 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3966 queue = port->rxqs[lrxq]->id;
3967 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3968 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3969 MVPP2_SNOOP_BUF_HDR_MASK;
3970 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3971 }
3972
3973
3974 mvpp2_interrupts_disable(port);
3975}
3976
3977
3978static void mvpp2_ingress_enable(struct mvpp2_port *port)
3979{
3980 u32 val;
3981 int lrxq, queue;
3982
3983 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3984 queue = port->rxqs[lrxq]->id;
3985 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3986 val &= ~MVPP2_RXQ_DISABLE_MASK;
3987 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3988 }
3989}
3990
3991static void mvpp2_ingress_disable(struct mvpp2_port *port)
3992{
3993 u32 val;
3994 int lrxq, queue;
3995
3996 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3997 queue = port->rxqs[lrxq]->id;
3998 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3999 val |= MVPP2_RXQ_DISABLE_MASK;
4000 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4001 }
4002}
4003
4004
4005
4006
4007static void mvpp2_egress_enable(struct mvpp2_port *port)
4008{
4009 u32 qmap;
4010 int queue;
4011 int tx_port_num = mvpp2_egress_port(port);
4012
4013
4014 qmap = 0;
4015 for (queue = 0; queue < txq_number; queue++) {
4016 struct mvpp2_tx_queue *txq = port->txqs[queue];
4017
4018 if (txq->descs != NULL)
4019 qmap |= (1 << queue);
4020 }
4021
4022 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4023 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4024}
4025
4026
4027
4028
4029static void mvpp2_egress_disable(struct mvpp2_port *port)
4030{
4031 u32 reg_data;
4032 int delay;
4033 int tx_port_num = mvpp2_egress_port(port);
4034
4035
4036 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4037 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4038 MVPP2_TXP_SCHED_ENQ_MASK;
4039 if (reg_data != 0)
4040 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4041 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4042
4043
4044 delay = 0;
4045 do {
4046 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4047 netdev_warn(port->dev,
4048 "Tx stop timed out, status=0x%08x\n",
4049 reg_data);
4050 break;
4051 }
4052 mdelay(1);
4053 delay++;
4054
4055
4056
4057
4058 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4059 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4060}
4061
4062
4063
4064
4065static inline int
4066mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4067{
4068 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4069
4070 return val & MVPP2_RXQ_OCCUPIED_MASK;
4071}
4072
4073
4074
4075
4076static inline void
4077mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4078 int used_count, int free_count)
4079{
4080
4081
4082
4083 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4084
4085 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4086}
4087
4088
4089static inline struct mvpp2_rx_desc *
4090mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4091{
4092 int rx_desc = rxq->next_desc_to_proc;
4093
4094 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4095 prefetch(rxq->descs + rxq->next_desc_to_proc);
4096 return rxq->descs + rx_desc;
4097}
4098
4099
4100static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4101 int prxq, int offset)
4102{
4103 u32 val;
4104
4105
4106 offset = offset >> 5;
4107
4108 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4109 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4110
4111
4112 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4113 MVPP2_RXQ_PACKET_OFFSET_MASK);
4114
4115 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4116}
4117
4118
4119static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4120{
4121 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4122 MVPP2_RXD_BM_POOL_ID_OFFS;
4123 int cpu = smp_processor_id();
4124
4125 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4126 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4127}
4128
4129
4130
4131
4132static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4133 struct mvpp2_tx_queue *txq)
4134{
4135 u32 val;
4136
4137 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4138 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4139
4140 return val & MVPP2_TXQ_PENDING_MASK;
4141}
4142
4143
4144static struct mvpp2_tx_desc *
4145mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4146{
4147 int tx_desc = txq->next_desc_to_proc;
4148
4149 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4150 return txq->descs + tx_desc;
4151}
4152
4153
4154static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4155{
4156
4157 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4158}
4159
4160
4161
4162
4163
4164static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4165 struct mvpp2_tx_queue *aggr_txq, int num)
4166{
4167 if ((aggr_txq->count + num) > aggr_txq->size) {
4168
4169 int cpu = smp_processor_id();
4170 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4171
4172 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4173 }
4174
4175 if ((aggr_txq->count + num) > aggr_txq->size)
4176 return -ENOMEM;
4177
4178 return 0;
4179}
4180
4181
4182static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4183 struct mvpp2_tx_queue *txq, int num)
4184{
4185 u32 val;
4186
4187 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4188 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4189
4190 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4191
4192 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4193}
4194
4195
4196
4197
4198static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4199 struct mvpp2_tx_queue *txq,
4200 struct mvpp2_txq_pcpu *txq_pcpu,
4201 int num)
4202{
4203 int req, cpu, desc_count;
4204
4205 if (txq_pcpu->reserved_num >= num)
4206 return 0;
4207
4208
4209
4210
4211
4212 desc_count = 0;
4213
4214 for_each_present_cpu(cpu) {
4215 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4216
4217 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4218 desc_count += txq_pcpu_aux->count;
4219 desc_count += txq_pcpu_aux->reserved_num;
4220 }
4221
4222 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4223 desc_count += req;
4224
4225 if (desc_count >
4226 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4227 return -ENOMEM;
4228
4229 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4230
4231
4232 if (txq_pcpu->reserved_num < num)
4233 return -ENOMEM;
4234 return 0;
4235}
4236
4237
4238
4239
4240static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4241{
4242 if (txq->next_desc_to_proc == 0)
4243 txq->next_desc_to_proc = txq->last_desc - 1;
4244 else
4245 txq->next_desc_to_proc--;
4246}
4247
4248
4249static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4250 int ip_hdr_len, int l4_proto)
4251{
4252 u32 command;
4253
4254
4255
4256
4257 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4258 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4259 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4260
4261 if (l3_proto == swab16(ETH_P_IP)) {
4262 command &= ~MVPP2_TXD_IP_CSUM_DISABLE;
4263 command &= ~MVPP2_TXD_L3_IP6;
4264 } else {
4265 command |= MVPP2_TXD_L3_IP6;
4266 }
4267
4268 if (l4_proto == IPPROTO_TCP) {
4269 command &= ~MVPP2_TXD_L4_UDP;
4270 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
4271 } else if (l4_proto == IPPROTO_UDP) {
4272 command |= MVPP2_TXD_L4_UDP;
4273 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
4274 } else {
4275 command |= MVPP2_TXD_L4_CSUM_NOT;
4276 }
4277
4278 return command;
4279}
4280
4281
4282
4283
4284
4285static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4286 struct mvpp2_tx_queue *txq)
4287{
4288 u32 val;
4289
4290
4291 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4292
4293 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4294 MVPP2_TRANSMITTED_COUNT_OFFSET;
4295}
4296
4297static void mvpp2_txq_sent_counter_clear(void *arg)
4298{
4299 struct mvpp2_port *port = arg;
4300 int queue;
4301
4302 for (queue = 0; queue < txq_number; queue++) {
4303 int id = port->txqs[queue]->id;
4304
4305 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4306 }
4307}
4308
4309
4310static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4311{
4312 u32 val, size, mtu;
4313 int txq, tx_port_num;
4314
4315 mtu = port->pkt_size * 8;
4316 if (mtu > MVPP2_TXP_MTU_MAX)
4317 mtu = MVPP2_TXP_MTU_MAX;
4318
4319
4320 mtu = 3 * mtu;
4321
4322
4323 tx_port_num = mvpp2_egress_port(port);
4324 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4325
4326
4327 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4328 val &= ~MVPP2_TXP_MTU_MAX;
4329 val |= mtu;
4330 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4331
4332
4333 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4334 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4335 if (size < mtu) {
4336 size = mtu;
4337 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4338 val |= size;
4339 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4340 }
4341
4342 for (txq = 0; txq < txq_number; txq++) {
4343 val = mvpp2_read(port->priv,
4344 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4345 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4346
4347 if (size < mtu) {
4348 size = mtu;
4349 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4350 val |= size;
4351 mvpp2_write(port->priv,
4352 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4353 val);
4354 }
4355 }
4356}
4357
4358
4359
4360
4361static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4362 struct mvpp2_rx_queue *rxq, u32 pkts)
4363{
4364 u32 val;
4365
4366 val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
4367 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4368 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
4369
4370 rxq->pkts_coal = pkts;
4371}
4372
4373
4374static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4375 struct mvpp2_rx_queue *rxq, u32 usec)
4376{
4377 u32 val;
4378
4379 val = (port->priv->tclk / USEC_PER_SEC) * usec;
4380 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4381
4382 rxq->time_coal = usec;
4383}
4384
4385
4386static void mvpp2_tx_done_pkts_coal_set(void *arg)
4387{
4388 struct mvpp2_port *port = arg;
4389 int queue;
4390 u32 val;
4391
4392 for (queue = 0; queue < txq_number; queue++) {
4393 struct mvpp2_tx_queue *txq = port->txqs[queue];
4394
4395 val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
4396 MVPP2_TRANSMITTED_THRESH_MASK;
4397 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4398 mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
4399 }
4400}
4401
4402
4403static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4404 struct mvpp2_tx_queue *txq,
4405 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4406{
4407 int i;
4408
4409 for (i = 0; i < num; i++) {
4410 struct mvpp2_tx_desc *tx_desc = txq->descs +
4411 txq_pcpu->txq_get_index;
4412 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4413
4414 mvpp2_txq_inc_get(txq_pcpu);
4415
4416 if (!skb)
4417 continue;
4418
4419 dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
4420 tx_desc->data_size, DMA_TO_DEVICE);
4421 dev_kfree_skb_any(skb);
4422 }
4423}
4424
4425static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4426 u32 cause)
4427{
4428 int queue = fls(cause) - 1;
4429
4430 return port->rxqs[queue];
4431}
4432
4433static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4434 u32 cause)
4435{
4436 int queue = fls(cause >> 16) - 1;
4437
4438 return port->txqs[queue];
4439}
4440
4441
4442static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4443 struct mvpp2_txq_pcpu *txq_pcpu)
4444{
4445 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4446 int tx_done;
4447
4448 if (txq_pcpu->cpu != smp_processor_id())
4449 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4450
4451 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4452 if (!tx_done)
4453 return;
4454 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4455
4456 txq_pcpu->count -= tx_done;
4457
4458 if (netif_tx_queue_stopped(nq))
4459 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4460 netif_tx_wake_queue(nq);
4461}
4462
4463
4464
4465
4466static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4467 struct mvpp2_tx_queue *aggr_txq,
4468 int desc_num, int cpu,
4469 struct mvpp2 *priv)
4470{
4471
4472 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4473 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4474 &aggr_txq->descs_phys, GFP_KERNEL);
4475 if (!aggr_txq->descs)
4476 return -ENOMEM;
4477
4478
4479 BUG_ON(aggr_txq->descs !=
4480 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4481
4482 aggr_txq->last_desc = aggr_txq->size - 1;
4483
4484
4485 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4486 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4487
4488
4489
4490 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4491 aggr_txq->descs_phys);
4492 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4493
4494 return 0;
4495}
4496
4497
4498static int mvpp2_rxq_init(struct mvpp2_port *port,
4499 struct mvpp2_rx_queue *rxq)
4500
4501{
4502 rxq->size = port->rx_ring_size;
4503
4504
4505 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4506 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4507 &rxq->descs_phys, GFP_KERNEL);
4508 if (!rxq->descs)
4509 return -ENOMEM;
4510
4511 BUG_ON(rxq->descs !=
4512 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4513
4514 rxq->last_desc = rxq->size - 1;
4515
4516
4517 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4518
4519
4520 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4521 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4522 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4523 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4524
4525
4526 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4527
4528
4529 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
4530 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
4531
4532
4533 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4534
4535 return 0;
4536}
4537
4538
4539static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4540 struct mvpp2_rx_queue *rxq)
4541{
4542 int rx_received, i;
4543
4544 rx_received = mvpp2_rxq_received(port, rxq->id);
4545 if (!rx_received)
4546 return;
4547
4548 for (i = 0; i < rx_received; i++) {
4549 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4550 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4551
4552 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4553 rx_desc->buf_cookie);
4554 }
4555 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4556}
4557
4558
4559static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4560 struct mvpp2_rx_queue *rxq)
4561{
4562 mvpp2_rxq_drop_pkts(port, rxq);
4563
4564 if (rxq->descs)
4565 dma_free_coherent(port->dev->dev.parent,
4566 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4567 rxq->descs,
4568 rxq->descs_phys);
4569
4570 rxq->descs = NULL;
4571 rxq->last_desc = 0;
4572 rxq->next_desc_to_proc = 0;
4573 rxq->descs_phys = 0;
4574
4575
4576
4577
4578 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4579 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4580 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4581 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4582}
4583
4584
4585static int mvpp2_txq_init(struct mvpp2_port *port,
4586 struct mvpp2_tx_queue *txq)
4587{
4588 u32 val;
4589 int cpu, desc, desc_per_txq, tx_port_num;
4590 struct mvpp2_txq_pcpu *txq_pcpu;
4591
4592 txq->size = port->tx_ring_size;
4593
4594
4595 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4596 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4597 &txq->descs_phys, GFP_KERNEL);
4598 if (!txq->descs)
4599 return -ENOMEM;
4600
4601
4602 BUG_ON(txq->descs !=
4603 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4604
4605 txq->last_desc = txq->size - 1;
4606
4607
4608 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4609 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4610 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4611 MVPP2_TXQ_DESC_SIZE_MASK);
4612 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4613 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4614 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4615 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4616 val &= ~MVPP2_TXQ_PENDING_MASK;
4617 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4618
4619
4620
4621
4622
4623
4624 desc_per_txq = 16;
4625 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4626 (txq->log_id * desc_per_txq);
4627
4628 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4629 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4630 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4631
4632
4633 tx_port_num = mvpp2_egress_port(port);
4634 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4635
4636 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4637 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4638 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4639 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4640 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4641
4642 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4643 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4644 val);
4645
4646 for_each_present_cpu(cpu) {
4647 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4648 txq_pcpu->size = txq->size;
4649 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4650 sizeof(*txq_pcpu->tx_skb),
4651 GFP_KERNEL);
4652 if (!txq_pcpu->tx_skb) {
4653 dma_free_coherent(port->dev->dev.parent,
4654 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4655 txq->descs, txq->descs_phys);
4656 return -ENOMEM;
4657 }
4658
4659 txq_pcpu->count = 0;
4660 txq_pcpu->reserved_num = 0;
4661 txq_pcpu->txq_put_index = 0;
4662 txq_pcpu->txq_get_index = 0;
4663 }
4664
4665 return 0;
4666}
4667
4668
4669static void mvpp2_txq_deinit(struct mvpp2_port *port,
4670 struct mvpp2_tx_queue *txq)
4671{
4672 struct mvpp2_txq_pcpu *txq_pcpu;
4673 int cpu;
4674
4675 for_each_present_cpu(cpu) {
4676 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4677 kfree(txq_pcpu->tx_skb);
4678 }
4679
4680 if (txq->descs)
4681 dma_free_coherent(port->dev->dev.parent,
4682 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4683 txq->descs, txq->descs_phys);
4684
4685 txq->descs = NULL;
4686 txq->last_desc = 0;
4687 txq->next_desc_to_proc = 0;
4688 txq->descs_phys = 0;
4689
4690
4691 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4692
4693
4694 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4695 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4696 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4697}
4698
4699
4700static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4701{
4702 struct mvpp2_txq_pcpu *txq_pcpu;
4703 int delay, pending, cpu;
4704 u32 val;
4705
4706 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4707 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4708 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4709 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4710
4711
4712
4713
4714 delay = 0;
4715 do {
4716 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4717 netdev_warn(port->dev,
4718 "port %d: cleaning queue %d timed out\n",
4719 port->id, txq->log_id);
4720 break;
4721 }
4722 mdelay(1);
4723 delay++;
4724
4725 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4726 } while (pending);
4727
4728 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4729 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4730
4731 for_each_present_cpu(cpu) {
4732 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4733
4734
4735 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4736
4737
4738 txq_pcpu->count = 0;
4739 txq_pcpu->txq_put_index = 0;
4740 txq_pcpu->txq_get_index = 0;
4741 }
4742}
4743
4744
4745static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4746{
4747 struct mvpp2_tx_queue *txq;
4748 int queue;
4749 u32 val;
4750
4751 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4752
4753
4754 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4755 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4756
4757 for (queue = 0; queue < txq_number; queue++) {
4758 txq = port->txqs[queue];
4759 mvpp2_txq_clean(port, txq);
4760 mvpp2_txq_deinit(port, txq);
4761 }
4762
4763 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4764
4765 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4766 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4767}
4768
4769
4770static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4771{
4772 int queue;
4773
4774 for (queue = 0; queue < rxq_number; queue++)
4775 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4776}
4777
4778
4779static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4780{
4781 int queue, err;
4782
4783 for (queue = 0; queue < rxq_number; queue++) {
4784 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4785 if (err)
4786 goto err_cleanup;
4787 }
4788 return 0;
4789
4790err_cleanup:
4791 mvpp2_cleanup_rxqs(port);
4792 return err;
4793}
4794
4795
4796static int mvpp2_setup_txqs(struct mvpp2_port *port)
4797{
4798 struct mvpp2_tx_queue *txq;
4799 int queue, err;
4800
4801 for (queue = 0; queue < txq_number; queue++) {
4802 txq = port->txqs[queue];
4803 err = mvpp2_txq_init(port, txq);
4804 if (err)
4805 goto err_cleanup;
4806 }
4807
4808 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
4809 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4810 return 0;
4811
4812err_cleanup:
4813 mvpp2_cleanup_txqs(port);
4814 return err;
4815}
4816
4817
4818static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4819{
4820 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4821
4822 mvpp2_interrupts_disable(port);
4823
4824 napi_schedule(&port->napi);
4825
4826 return IRQ_HANDLED;
4827}
4828
4829
4830static void mvpp2_link_event(struct net_device *dev)
4831{
4832 struct mvpp2_port *port = netdev_priv(dev);
4833 struct phy_device *phydev = port->phy_dev;
4834 int status_change = 0;
4835 u32 val;
4836
4837 if (phydev->link) {
4838 if ((port->speed != phydev->speed) ||
4839 (port->duplex != phydev->duplex)) {
4840 u32 val;
4841
4842 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4843 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4844 MVPP2_GMAC_CONFIG_GMII_SPEED |
4845 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4846 MVPP2_GMAC_AN_SPEED_EN |
4847 MVPP2_GMAC_AN_DUPLEX_EN);
4848
4849 if (phydev->duplex)
4850 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4851
4852 if (phydev->speed == SPEED_1000)
4853 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4854 else if (phydev->speed == SPEED_100)
4855 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4856
4857 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4858
4859 port->duplex = phydev->duplex;
4860 port->speed = phydev->speed;
4861 }
4862 }
4863
4864 if (phydev->link != port->link) {
4865 if (!phydev->link) {
4866 port->duplex = -1;
4867 port->speed = 0;
4868 }
4869
4870 port->link = phydev->link;
4871 status_change = 1;
4872 }
4873
4874 if (status_change) {
4875 if (phydev->link) {
4876 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4877 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4878 MVPP2_GMAC_FORCE_LINK_DOWN);
4879 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4880 mvpp2_egress_enable(port);
4881 mvpp2_ingress_enable(port);
4882 } else {
4883 mvpp2_ingress_disable(port);
4884 mvpp2_egress_disable(port);
4885 }
4886 phy_print_status(phydev);
4887 }
4888}
4889
4890
4891
4892
4893static void mvpp2_rx_error(struct mvpp2_port *port,
4894 struct mvpp2_rx_desc *rx_desc)
4895{
4896 u32 status = rx_desc->status;
4897
4898 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4899 case MVPP2_RXD_ERR_CRC:
4900 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4901 status, rx_desc->data_size);
4902 break;
4903 case MVPP2_RXD_ERR_OVERRUN:
4904 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4905 status, rx_desc->data_size);
4906 break;
4907 case MVPP2_RXD_ERR_RESOURCE:
4908 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4909 status, rx_desc->data_size);
4910 break;
4911 }
4912}
4913
4914
4915static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4916 struct sk_buff *skb)
4917{
4918 if (((status & MVPP2_RXD_L3_IP4) &&
4919 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4920 (status & MVPP2_RXD_L3_IP6))
4921 if (((status & MVPP2_RXD_L4_UDP) ||
4922 (status & MVPP2_RXD_L4_TCP)) &&
4923 (status & MVPP2_RXD_L4_CSUM_OK)) {
4924 skb->csum = 0;
4925 skb->ip_summed = CHECKSUM_UNNECESSARY;
4926 return;
4927 }
4928
4929 skb->ip_summed = CHECKSUM_NONE;
4930}
4931
4932
4933static int mvpp2_rx_refill(struct mvpp2_port *port,
4934 struct mvpp2_bm_pool *bm_pool,
4935 u32 bm, int is_recycle)
4936{
4937 struct sk_buff *skb;
4938 dma_addr_t phys_addr;
4939
4940 if (is_recycle &&
4941 (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
4942 return 0;
4943
4944
4945 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
4946 if (!skb)
4947 return -ENOMEM;
4948
4949 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
4950 atomic_dec(&bm_pool->in_use);
4951 return 0;
4952}
4953
4954
4955static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
4956{
4957 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4958 int ip_hdr_len = 0;
4959 u8 l4_proto;
4960
4961 if (skb->protocol == htons(ETH_P_IP)) {
4962 struct iphdr *ip4h = ip_hdr(skb);
4963
4964
4965 ip_hdr_len = ip4h->ihl;
4966 l4_proto = ip4h->protocol;
4967 } else if (skb->protocol == htons(ETH_P_IPV6)) {
4968 struct ipv6hdr *ip6h = ipv6_hdr(skb);
4969
4970
4971 if (skb_network_header_len(skb) > 0)
4972 ip_hdr_len = (skb_network_header_len(skb) >> 2);
4973 l4_proto = ip6h->nexthdr;
4974 } else {
4975 return MVPP2_TXD_L4_CSUM_NOT;
4976 }
4977
4978 return mvpp2_txq_desc_csum(skb_network_offset(skb),
4979 skb->protocol, ip_hdr_len, l4_proto);
4980 }
4981
4982 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
4983}
4984
4985static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
4986 struct mvpp2_rx_desc *rx_desc)
4987{
4988 struct mvpp2_buff_hdr *buff_hdr;
4989 struct sk_buff *skb;
4990 u32 rx_status = rx_desc->status;
4991 u32 buff_phys_addr;
4992 u32 buff_virt_addr;
4993 u32 buff_phys_addr_next;
4994 u32 buff_virt_addr_next;
4995 int mc_id;
4996 int pool_id;
4997
4998 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4999 MVPP2_RXD_BM_POOL_ID_OFFS;
5000 buff_phys_addr = rx_desc->buf_phys_addr;
5001 buff_virt_addr = rx_desc->buf_cookie;
5002
5003 do {
5004 skb = (struct sk_buff *)buff_virt_addr;
5005 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5006
5007 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5008
5009 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5010 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5011
5012
5013 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5014 buff_virt_addr, mc_id);
5015
5016 buff_phys_addr = buff_phys_addr_next;
5017 buff_virt_addr = buff_virt_addr_next;
5018
5019 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5020}
5021
5022
5023static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5024 struct mvpp2_rx_queue *rxq)
5025{
5026 struct net_device *dev = port->dev;
5027 int rx_received, rx_filled, i;
5028 u32 rcvd_pkts = 0;
5029 u32 rcvd_bytes = 0;
5030
5031
5032 rx_received = mvpp2_rxq_received(port, rxq->id);
5033 if (rx_todo > rx_received)
5034 rx_todo = rx_received;
5035
5036 rx_filled = 0;
5037 for (i = 0; i < rx_todo; i++) {
5038 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5039 struct mvpp2_bm_pool *bm_pool;
5040 struct sk_buff *skb;
5041 u32 bm, rx_status;
5042 int pool, rx_bytes, err;
5043
5044 rx_filled++;
5045 rx_status = rx_desc->status;
5046 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5047
5048 bm = mvpp2_bm_cookie_build(rx_desc);
5049 pool = mvpp2_bm_cookie_pool_get(bm);
5050 bm_pool = &port->priv->bm_pools[pool];
5051
5052 if (rx_status & MVPP2_RXD_BUF_HDR) {
5053 mvpp2_buff_hdr_rx(port, rx_desc);
5054 continue;
5055 }
5056
5057
5058
5059
5060
5061
5062 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5063 dev->stats.rx_errors++;
5064 mvpp2_rx_error(port, rx_desc);
5065 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5066 rx_desc->buf_cookie);
5067 continue;
5068 }
5069
5070 skb = (struct sk_buff *)rx_desc->buf_cookie;
5071
5072 rcvd_pkts++;
5073 rcvd_bytes += rx_bytes;
5074 atomic_inc(&bm_pool->in_use);
5075
5076 skb_reserve(skb, MVPP2_MH_SIZE);
5077 skb_put(skb, rx_bytes);
5078 skb->protocol = eth_type_trans(skb, dev);
5079 mvpp2_rx_csum(port, rx_status, skb);
5080
5081 napi_gro_receive(&port->napi, skb);
5082
5083 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5084 if (err) {
5085 netdev_err(port->dev, "failed to refill BM pools\n");
5086 rx_filled--;
5087 }
5088 }
5089
5090 if (rcvd_pkts) {
5091 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5092
5093 u64_stats_update_begin(&stats->syncp);
5094 stats->rx_packets += rcvd_pkts;
5095 stats->rx_bytes += rcvd_bytes;
5096 u64_stats_update_end(&stats->syncp);
5097 }
5098
5099
5100 wmb();
5101 mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
5102
5103 return rx_todo;
5104}
5105
5106static inline void
5107tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5108 struct mvpp2_tx_desc *desc)
5109{
5110 dma_unmap_single(dev, desc->buf_phys_addr,
5111 desc->data_size, DMA_TO_DEVICE);
5112 mvpp2_txq_desc_put(txq);
5113}
5114
5115
5116static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5117 struct mvpp2_tx_queue *aggr_txq,
5118 struct mvpp2_tx_queue *txq)
5119{
5120 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5121 struct mvpp2_tx_desc *tx_desc;
5122 int i;
5123 dma_addr_t buf_phys_addr;
5124
5125 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5126 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5127 void *addr = page_address(frag->page.p) + frag->page_offset;
5128
5129 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5130 tx_desc->phys_txq = txq->id;
5131 tx_desc->data_size = frag->size;
5132
5133 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5134 tx_desc->data_size,
5135 DMA_TO_DEVICE);
5136 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5137 mvpp2_txq_desc_put(txq);
5138 goto error;
5139 }
5140
5141 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5142 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5143
5144 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5145
5146 tx_desc->command = MVPP2_TXD_L_DESC;
5147 mvpp2_txq_inc_put(txq_pcpu, skb);
5148 } else {
5149
5150 tx_desc->command = 0;
5151 mvpp2_txq_inc_put(txq_pcpu, NULL);
5152 }
5153 }
5154
5155 return 0;
5156
5157error:
5158
5159
5160
5161 for (i = i - 1; i >= 0; i--) {
5162 tx_desc = txq->descs + i;
5163 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5164 }
5165
5166 return -ENOMEM;
5167}
5168
5169
5170static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5171{
5172 struct mvpp2_port *port = netdev_priv(dev);
5173 struct mvpp2_tx_queue *txq, *aggr_txq;
5174 struct mvpp2_txq_pcpu *txq_pcpu;
5175 struct mvpp2_tx_desc *tx_desc;
5176 dma_addr_t buf_phys_addr;
5177 int frags = 0;
5178 u16 txq_id;
5179 u32 tx_cmd;
5180
5181 txq_id = skb_get_queue_mapping(skb);
5182 txq = port->txqs[txq_id];
5183 txq_pcpu = this_cpu_ptr(txq->pcpu);
5184 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5185
5186 frags = skb_shinfo(skb)->nr_frags + 1;
5187
5188
5189 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5190 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5191 txq_pcpu, frags)) {
5192 frags = 0;
5193 goto out;
5194 }
5195
5196
5197 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5198 tx_desc->phys_txq = txq->id;
5199 tx_desc->data_size = skb_headlen(skb);
5200
5201 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5202 tx_desc->data_size, DMA_TO_DEVICE);
5203 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5204 mvpp2_txq_desc_put(txq);
5205 frags = 0;
5206 goto out;
5207 }
5208 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5209 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5210
5211 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5212
5213 if (frags == 1) {
5214
5215 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5216 tx_desc->command = tx_cmd;
5217 mvpp2_txq_inc_put(txq_pcpu, skb);
5218 } else {
5219
5220 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5221 tx_desc->command = tx_cmd;
5222 mvpp2_txq_inc_put(txq_pcpu, NULL);
5223
5224
5225 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5226 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5227 frags = 0;
5228 goto out;
5229 }
5230 }
5231
5232 txq_pcpu->reserved_num -= frags;
5233 txq_pcpu->count += frags;
5234 aggr_txq->count += frags;
5235
5236
5237 wmb();
5238 mvpp2_aggr_txq_pend_desc_add(port, frags);
5239
5240 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5241 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5242
5243 netif_tx_stop_queue(nq);
5244 }
5245out:
5246 if (frags > 0) {
5247 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5248
5249 u64_stats_update_begin(&stats->syncp);
5250 stats->tx_packets++;
5251 stats->tx_bytes += skb->len;
5252 u64_stats_update_end(&stats->syncp);
5253 } else {
5254 dev->stats.tx_dropped++;
5255 dev_kfree_skb_any(skb);
5256 }
5257
5258 return NETDEV_TX_OK;
5259}
5260
5261static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5262{
5263 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5264 netdev_err(dev, "FCS error\n");
5265 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5266 netdev_err(dev, "rx fifo overrun error\n");
5267 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5268 netdev_err(dev, "tx fifo underrun error\n");
5269}
5270
5271static void mvpp2_txq_done_percpu(void *arg)
5272{
5273 struct mvpp2_port *port = arg;
5274 u32 cause_rx_tx, cause_tx, cause_misc;
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286 cause_rx_tx = mvpp2_read(port->priv,
5287 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5288 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5289 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5290
5291 if (cause_misc) {
5292 mvpp2_cause_error(port->dev, cause_misc);
5293
5294
5295 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5296 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5297 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5298 }
5299
5300
5301 if (cause_tx) {
5302 struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
5303 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5304
5305 if (txq_pcpu->count)
5306 mvpp2_txq_done(port, txq, txq_pcpu);
5307 }
5308}
5309
5310static int mvpp2_poll(struct napi_struct *napi, int budget)
5311{
5312 u32 cause_rx_tx, cause_rx;
5313 int rx_done = 0;
5314 struct mvpp2_port *port = netdev_priv(napi->dev);
5315
5316 on_each_cpu(mvpp2_txq_done_percpu, port, 1);
5317
5318 cause_rx_tx = mvpp2_read(port->priv,
5319 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5320 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5321
5322
5323 cause_rx |= port->pending_cause_rx;
5324 while (cause_rx && budget > 0) {
5325 int count;
5326 struct mvpp2_rx_queue *rxq;
5327
5328 rxq = mvpp2_get_rx_queue(port, cause_rx);
5329 if (!rxq)
5330 break;
5331
5332 count = mvpp2_rx(port, budget, rxq);
5333 rx_done += count;
5334 budget -= count;
5335 if (budget > 0) {
5336
5337
5338
5339
5340 cause_rx &= ~(1 << rxq->logic_rxq);
5341 }
5342 }
5343
5344 if (budget > 0) {
5345 cause_rx = 0;
5346 napi_complete(napi);
5347
5348 mvpp2_interrupts_enable(port);
5349 }
5350 port->pending_cause_rx = cause_rx;
5351 return rx_done;
5352}
5353
5354
5355static void mvpp2_start_dev(struct mvpp2_port *port)
5356{
5357 mvpp2_gmac_max_rx_size_set(port);
5358 mvpp2_txp_max_tx_size_set(port);
5359
5360 napi_enable(&port->napi);
5361
5362
5363 mvpp2_interrupts_enable(port);
5364
5365 mvpp2_port_enable(port);
5366 phy_start(port->phy_dev);
5367 netif_tx_start_all_queues(port->dev);
5368}
5369
5370
5371static void mvpp2_stop_dev(struct mvpp2_port *port)
5372{
5373
5374 mvpp2_ingress_disable(port);
5375
5376 mdelay(10);
5377
5378
5379 mvpp2_interrupts_disable(port);
5380
5381 napi_disable(&port->napi);
5382
5383 netif_carrier_off(port->dev);
5384 netif_tx_stop_all_queues(port->dev);
5385
5386 mvpp2_egress_disable(port);
5387 mvpp2_port_disable(port);
5388 phy_stop(port->phy_dev);
5389}
5390
5391
5392static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
5393{
5394 if (mtu < 68) {
5395 netdev_err(dev, "cannot change mtu to less than 68\n");
5396 return -EINVAL;
5397 }
5398
5399
5400 if (mtu > 9676) {
5401 netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
5402 mtu = 9676;
5403 }
5404
5405 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5406 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5407 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5408 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5409 }
5410
5411 return mtu;
5412}
5413
5414static int mvpp2_check_ringparam_valid(struct net_device *dev,
5415 struct ethtool_ringparam *ring)
5416{
5417 u16 new_rx_pending = ring->rx_pending;
5418 u16 new_tx_pending = ring->tx_pending;
5419
5420 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5421 return -EINVAL;
5422
5423 if (ring->rx_pending > MVPP2_MAX_RXD)
5424 new_rx_pending = MVPP2_MAX_RXD;
5425 else if (!IS_ALIGNED(ring->rx_pending, 16))
5426 new_rx_pending = ALIGN(ring->rx_pending, 16);
5427
5428 if (ring->tx_pending > MVPP2_MAX_TXD)
5429 new_tx_pending = MVPP2_MAX_TXD;
5430 else if (!IS_ALIGNED(ring->tx_pending, 32))
5431 new_tx_pending = ALIGN(ring->tx_pending, 32);
5432
5433 if (ring->rx_pending != new_rx_pending) {
5434 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5435 ring->rx_pending, new_rx_pending);
5436 ring->rx_pending = new_rx_pending;
5437 }
5438
5439 if (ring->tx_pending != new_tx_pending) {
5440 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5441 ring->tx_pending, new_tx_pending);
5442 ring->tx_pending = new_tx_pending;
5443 }
5444
5445 return 0;
5446}
5447
5448static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5449{
5450 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5451
5452 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5453 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5454 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5455 addr[0] = (mac_addr_h >> 24) & 0xFF;
5456 addr[1] = (mac_addr_h >> 16) & 0xFF;
5457 addr[2] = (mac_addr_h >> 8) & 0xFF;
5458 addr[3] = mac_addr_h & 0xFF;
5459 addr[4] = mac_addr_m & 0xFF;
5460 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5461}
5462
5463static int mvpp2_phy_connect(struct mvpp2_port *port)
5464{
5465 struct phy_device *phy_dev;
5466
5467 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5468 port->phy_interface);
5469 if (!phy_dev) {
5470 netdev_err(port->dev, "cannot connect to phy\n");
5471 return -ENODEV;
5472 }
5473 phy_dev->supported &= PHY_GBIT_FEATURES;
5474 phy_dev->advertising = phy_dev->supported;
5475
5476 port->phy_dev = phy_dev;
5477 port->link = 0;
5478 port->duplex = 0;
5479 port->speed = 0;
5480
5481 return 0;
5482}
5483
5484static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5485{
5486 phy_disconnect(port->phy_dev);
5487 port->phy_dev = NULL;
5488}
5489
5490static int mvpp2_open(struct net_device *dev)
5491{
5492 struct mvpp2_port *port = netdev_priv(dev);
5493 unsigned char mac_bcast[ETH_ALEN] = {
5494 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5495 int err;
5496
5497 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5498 if (err) {
5499 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5500 return err;
5501 }
5502 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5503 dev->dev_addr, true);
5504 if (err) {
5505 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5506 return err;
5507 }
5508 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5509 if (err) {
5510 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5511 return err;
5512 }
5513 err = mvpp2_prs_def_flow(port);
5514 if (err) {
5515 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5516 return err;
5517 }
5518
5519
5520 err = mvpp2_setup_rxqs(port);
5521 if (err) {
5522 netdev_err(port->dev, "cannot allocate Rx queues\n");
5523 return err;
5524 }
5525
5526 err = mvpp2_setup_txqs(port);
5527 if (err) {
5528 netdev_err(port->dev, "cannot allocate Tx queues\n");
5529 goto err_cleanup_rxqs;
5530 }
5531
5532 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5533 if (err) {
5534 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5535 goto err_cleanup_txqs;
5536 }
5537
5538
5539 netif_carrier_off(port->dev);
5540
5541 err = mvpp2_phy_connect(port);
5542 if (err < 0)
5543 goto err_free_irq;
5544
5545
5546 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5547
5548 mvpp2_start_dev(port);
5549
5550 return 0;
5551
5552err_free_irq:
5553 free_irq(port->irq, port);
5554err_cleanup_txqs:
5555 mvpp2_cleanup_txqs(port);
5556err_cleanup_rxqs:
5557 mvpp2_cleanup_rxqs(port);
5558 return err;
5559}
5560
5561static int mvpp2_stop(struct net_device *dev)
5562{
5563 struct mvpp2_port *port = netdev_priv(dev);
5564
5565 mvpp2_stop_dev(port);
5566 mvpp2_phy_disconnect(port);
5567
5568
5569 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5570
5571 free_irq(port->irq, port);
5572 mvpp2_cleanup_rxqs(port);
5573 mvpp2_cleanup_txqs(port);
5574
5575 return 0;
5576}
5577
5578static void mvpp2_set_rx_mode(struct net_device *dev)
5579{
5580 struct mvpp2_port *port = netdev_priv(dev);
5581 struct mvpp2 *priv = port->priv;
5582 struct netdev_hw_addr *ha;
5583 int id = port->id;
5584 bool allmulti = dev->flags & IFF_ALLMULTI;
5585
5586 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5587 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5588 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5589
5590
5591 mvpp2_prs_mcast_del_all(priv, id);
5592
5593 if (allmulti && !netdev_mc_empty(dev)) {
5594 netdev_for_each_mc_addr(ha, dev)
5595 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5596 }
5597}
5598
5599static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5600{
5601 struct mvpp2_port *port = netdev_priv(dev);
5602 const struct sockaddr *addr = p;
5603 int err;
5604
5605 if (!is_valid_ether_addr(addr->sa_data)) {
5606 err = -EADDRNOTAVAIL;
5607 goto error;
5608 }
5609
5610 if (!netif_running(dev)) {
5611 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5612 if (!err)
5613 return 0;
5614
5615 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5616 if (err)
5617 goto error;
5618 }
5619
5620 mvpp2_stop_dev(port);
5621
5622 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5623 if (!err)
5624 goto out_start;
5625
5626
5627 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5628 if (err)
5629 goto error;
5630out_start:
5631 mvpp2_start_dev(port);
5632 mvpp2_egress_enable(port);
5633 mvpp2_ingress_enable(port);
5634 return 0;
5635
5636error:
5637 netdev_err(dev, "fail to change MAC address\n");
5638 return err;
5639}
5640
5641static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5642{
5643 struct mvpp2_port *port = netdev_priv(dev);
5644 int err;
5645
5646 mtu = mvpp2_check_mtu_valid(dev, mtu);
5647 if (mtu < 0) {
5648 err = mtu;
5649 goto error;
5650 }
5651
5652 if (!netif_running(dev)) {
5653 err = mvpp2_bm_update_mtu(dev, mtu);
5654 if (!err) {
5655 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5656 return 0;
5657 }
5658
5659
5660 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5661 if (err)
5662 goto error;
5663 }
5664
5665 mvpp2_stop_dev(port);
5666
5667 err = mvpp2_bm_update_mtu(dev, mtu);
5668 if (!err) {
5669 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5670 goto out_start;
5671 }
5672
5673
5674 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5675 if (err)
5676 goto error;
5677
5678out_start:
5679 mvpp2_start_dev(port);
5680 mvpp2_egress_enable(port);
5681 mvpp2_ingress_enable(port);
5682
5683 return 0;
5684
5685error:
5686 netdev_err(dev, "fail to change MTU\n");
5687 return err;
5688}
5689
5690static struct rtnl_link_stats64 *
5691mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5692{
5693 struct mvpp2_port *port = netdev_priv(dev);
5694 unsigned int start;
5695 int cpu;
5696
5697 for_each_possible_cpu(cpu) {
5698 struct mvpp2_pcpu_stats *cpu_stats;
5699 u64 rx_packets;
5700 u64 rx_bytes;
5701 u64 tx_packets;
5702 u64 tx_bytes;
5703
5704 cpu_stats = per_cpu_ptr(port->stats, cpu);
5705 do {
5706 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5707 rx_packets = cpu_stats->rx_packets;
5708 rx_bytes = cpu_stats->rx_bytes;
5709 tx_packets = cpu_stats->tx_packets;
5710 tx_bytes = cpu_stats->tx_bytes;
5711 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5712
5713 stats->rx_packets += rx_packets;
5714 stats->rx_bytes += rx_bytes;
5715 stats->tx_packets += tx_packets;
5716 stats->tx_bytes += tx_bytes;
5717 }
5718
5719 stats->rx_errors = dev->stats.rx_errors;
5720 stats->rx_dropped = dev->stats.rx_dropped;
5721 stats->tx_dropped = dev->stats.tx_dropped;
5722
5723 return stats;
5724}
5725
5726static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5727{
5728 struct mvpp2_port *port = netdev_priv(dev);
5729 int ret;
5730
5731 if (!port->phy_dev)
5732 return -ENOTSUPP;
5733
5734 ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
5735 if (!ret)
5736 mvpp2_link_event(dev);
5737
5738 return ret;
5739}
5740
5741
5742
5743
5744static int mvpp2_ethtool_get_settings(struct net_device *dev,
5745 struct ethtool_cmd *cmd)
5746{
5747 struct mvpp2_port *port = netdev_priv(dev);
5748
5749 if (!port->phy_dev)
5750 return -ENODEV;
5751 return phy_ethtool_gset(port->phy_dev, cmd);
5752}
5753
5754
5755static int mvpp2_ethtool_set_settings(struct net_device *dev,
5756 struct ethtool_cmd *cmd)
5757{
5758 struct mvpp2_port *port = netdev_priv(dev);
5759
5760 if (!port->phy_dev)
5761 return -ENODEV;
5762 return phy_ethtool_sset(port->phy_dev, cmd);
5763}
5764
5765
5766static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5767 struct ethtool_coalesce *c)
5768{
5769 struct mvpp2_port *port = netdev_priv(dev);
5770 int queue;
5771
5772 for (queue = 0; queue < rxq_number; queue++) {
5773 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5774
5775 rxq->time_coal = c->rx_coalesce_usecs;
5776 rxq->pkts_coal = c->rx_max_coalesced_frames;
5777 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
5778 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
5779 }
5780
5781 for (queue = 0; queue < txq_number; queue++) {
5782 struct mvpp2_tx_queue *txq = port->txqs[queue];
5783
5784 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5785 }
5786
5787 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
5788 return 0;
5789}
5790
5791
5792static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5793 struct ethtool_coalesce *c)
5794{
5795 struct mvpp2_port *port = netdev_priv(dev);
5796
5797 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5798 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5799 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5800 return 0;
5801}
5802
5803static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5804 struct ethtool_drvinfo *drvinfo)
5805{
5806 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5807 sizeof(drvinfo->driver));
5808 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5809 sizeof(drvinfo->version));
5810 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5811 sizeof(drvinfo->bus_info));
5812}
5813
5814static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5815 struct ethtool_ringparam *ring)
5816{
5817 struct mvpp2_port *port = netdev_priv(dev);
5818
5819 ring->rx_max_pending = MVPP2_MAX_RXD;
5820 ring->tx_max_pending = MVPP2_MAX_TXD;
5821 ring->rx_pending = port->rx_ring_size;
5822 ring->tx_pending = port->tx_ring_size;
5823}
5824
5825static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5826 struct ethtool_ringparam *ring)
5827{
5828 struct mvpp2_port *port = netdev_priv(dev);
5829 u16 prev_rx_ring_size = port->rx_ring_size;
5830 u16 prev_tx_ring_size = port->tx_ring_size;
5831 int err;
5832
5833 err = mvpp2_check_ringparam_valid(dev, ring);
5834 if (err)
5835 return err;
5836
5837 if (!netif_running(dev)) {
5838 port->rx_ring_size = ring->rx_pending;
5839 port->tx_ring_size = ring->tx_pending;
5840 return 0;
5841 }
5842
5843
5844
5845
5846 mvpp2_stop_dev(port);
5847 mvpp2_cleanup_rxqs(port);
5848 mvpp2_cleanup_txqs(port);
5849
5850 port->rx_ring_size = ring->rx_pending;
5851 port->tx_ring_size = ring->tx_pending;
5852
5853 err = mvpp2_setup_rxqs(port);
5854 if (err) {
5855
5856 port->rx_ring_size = prev_rx_ring_size;
5857 ring->rx_pending = prev_rx_ring_size;
5858 err = mvpp2_setup_rxqs(port);
5859 if (err)
5860 goto err_out;
5861 }
5862 err = mvpp2_setup_txqs(port);
5863 if (err) {
5864
5865 port->tx_ring_size = prev_tx_ring_size;
5866 ring->tx_pending = prev_tx_ring_size;
5867 err = mvpp2_setup_txqs(port);
5868 if (err)
5869 goto err_clean_rxqs;
5870 }
5871
5872 mvpp2_start_dev(port);
5873 mvpp2_egress_enable(port);
5874 mvpp2_ingress_enable(port);
5875
5876 return 0;
5877
5878err_clean_rxqs:
5879 mvpp2_cleanup_rxqs(port);
5880err_out:
5881 netdev_err(dev, "fail to change ring parameters");
5882 return err;
5883}
5884
5885
5886
5887static const struct net_device_ops mvpp2_netdev_ops = {
5888 .ndo_open = mvpp2_open,
5889 .ndo_stop = mvpp2_stop,
5890 .ndo_start_xmit = mvpp2_tx,
5891 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5892 .ndo_set_mac_address = mvpp2_set_mac_address,
5893 .ndo_change_mtu = mvpp2_change_mtu,
5894 .ndo_get_stats64 = mvpp2_get_stats64,
5895 .ndo_do_ioctl = mvpp2_ioctl,
5896};
5897
5898static const struct ethtool_ops mvpp2_eth_tool_ops = {
5899 .get_link = ethtool_op_get_link,
5900 .get_settings = mvpp2_ethtool_get_settings,
5901 .set_settings = mvpp2_ethtool_set_settings,
5902 .set_coalesce = mvpp2_ethtool_set_coalesce,
5903 .get_coalesce = mvpp2_ethtool_get_coalesce,
5904 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5905 .get_ringparam = mvpp2_ethtool_get_ringparam,
5906 .set_ringparam = mvpp2_ethtool_set_ringparam,
5907};
5908
5909
5910
5911static void mvpp2_port_power_up(struct mvpp2_port *port)
5912{
5913 mvpp2_port_mii_set(port);
5914 mvpp2_port_periodic_xon_disable(port);
5915 mvpp2_port_fc_adv_enable(port);
5916 mvpp2_port_reset(port);
5917}
5918
5919
5920static int mvpp2_port_init(struct mvpp2_port *port)
5921{
5922 struct device *dev = port->dev->dev.parent;
5923 struct mvpp2 *priv = port->priv;
5924 struct mvpp2_txq_pcpu *txq_pcpu;
5925 int queue, cpu, err;
5926
5927 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
5928 return -EINVAL;
5929
5930
5931 mvpp2_egress_disable(port);
5932 mvpp2_port_disable(port);
5933
5934 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
5935 GFP_KERNEL);
5936 if (!port->txqs)
5937 return -ENOMEM;
5938
5939
5940
5941
5942 for (queue = 0; queue < txq_number; queue++) {
5943 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5944 struct mvpp2_tx_queue *txq;
5945
5946 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5947 if (!txq)
5948 return -ENOMEM;
5949
5950 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5951 if (!txq->pcpu) {
5952 err = -ENOMEM;
5953 goto err_free_percpu;
5954 }
5955
5956 txq->id = queue_phy_id;
5957 txq->log_id = queue;
5958 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5959 for_each_present_cpu(cpu) {
5960 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5961 txq_pcpu->cpu = cpu;
5962 }
5963
5964 port->txqs[queue] = txq;
5965 }
5966
5967 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
5968 GFP_KERNEL);
5969 if (!port->rxqs) {
5970 err = -ENOMEM;
5971 goto err_free_percpu;
5972 }
5973
5974
5975 for (queue = 0; queue < rxq_number; queue++) {
5976 struct mvpp2_rx_queue *rxq;
5977
5978
5979 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5980 if (!rxq)
5981 goto err_free_percpu;
5982
5983 rxq->id = port->first_rxq + queue;
5984 rxq->port = port->id;
5985 rxq->logic_rxq = queue;
5986
5987 port->rxqs[queue] = rxq;
5988 }
5989
5990
5991 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
5992
5993
5994 for (queue = 0; queue < rxq_number; queue++) {
5995 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5996
5997 rxq->size = port->rx_ring_size;
5998 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5999 rxq->time_coal = MVPP2_RX_COAL_USEC;
6000 }
6001
6002 mvpp2_ingress_disable(port);
6003
6004
6005 mvpp2_defaults_set(port);
6006
6007
6008 mvpp2_cls_oversize_rxq_set(port);
6009 mvpp2_cls_port_config(port);
6010
6011
6012 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6013
6014
6015 err = mvpp2_swf_bm_pool_init(port);
6016 if (err)
6017 goto err_free_percpu;
6018
6019 return 0;
6020
6021err_free_percpu:
6022 for (queue = 0; queue < txq_number; queue++) {
6023 if (!port->txqs[queue])
6024 continue;
6025 free_percpu(port->txqs[queue]->pcpu);
6026 }
6027 return err;
6028}
6029
6030
6031static int mvpp2_port_probe(struct platform_device *pdev,
6032 struct device_node *port_node,
6033 struct mvpp2 *priv,
6034 int *next_first_rxq)
6035{
6036 struct device_node *phy_node;
6037 struct mvpp2_port *port;
6038 struct net_device *dev;
6039 struct resource *res;
6040 const char *dt_mac_addr;
6041 const char *mac_from;
6042 char hw_mac_addr[ETH_ALEN];
6043 u32 id;
6044 int features;
6045 int phy_mode;
6046 int priv_common_regs_num = 2;
6047 int err, i;
6048
6049 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6050 rxq_number);
6051 if (!dev)
6052 return -ENOMEM;
6053
6054 phy_node = of_parse_phandle(port_node, "phy", 0);
6055 if (!phy_node) {
6056 dev_err(&pdev->dev, "missing phy\n");
6057 err = -ENODEV;
6058 goto err_free_netdev;
6059 }
6060
6061 phy_mode = of_get_phy_mode(port_node);
6062 if (phy_mode < 0) {
6063 dev_err(&pdev->dev, "incorrect phy mode\n");
6064 err = phy_mode;
6065 goto err_free_netdev;
6066 }
6067
6068 if (of_property_read_u32(port_node, "port-id", &id)) {
6069 err = -EINVAL;
6070 dev_err(&pdev->dev, "missing port-id value\n");
6071 goto err_free_netdev;
6072 }
6073
6074 dev->tx_queue_len = MVPP2_MAX_TXD;
6075 dev->watchdog_timeo = 5 * HZ;
6076 dev->netdev_ops = &mvpp2_netdev_ops;
6077 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6078
6079 port = netdev_priv(dev);
6080
6081 port->irq = irq_of_parse_and_map(port_node, 0);
6082 if (port->irq <= 0) {
6083 err = -EINVAL;
6084 goto err_free_netdev;
6085 }
6086
6087 if (of_property_read_bool(port_node, "marvell,loopback"))
6088 port->flags |= MVPP2_F_LOOPBACK;
6089
6090 port->priv = priv;
6091 port->id = id;
6092 port->first_rxq = *next_first_rxq;
6093 port->phy_node = phy_node;
6094 port->phy_interface = phy_mode;
6095
6096 res = platform_get_resource(pdev, IORESOURCE_MEM,
6097 priv_common_regs_num + id);
6098 port->base = devm_ioremap_resource(&pdev->dev, res);
6099 if (IS_ERR(port->base)) {
6100 err = PTR_ERR(port->base);
6101 goto err_free_irq;
6102 }
6103
6104
6105 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6106 if (!port->stats) {
6107 err = -ENOMEM;
6108 goto err_free_irq;
6109 }
6110
6111 dt_mac_addr = of_get_mac_address(port_node);
6112 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6113 mac_from = "device tree";
6114 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6115 } else {
6116 mvpp2_get_mac_address(port, hw_mac_addr);
6117 if (is_valid_ether_addr(hw_mac_addr)) {
6118 mac_from = "hardware";
6119 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6120 } else {
6121 mac_from = "random";
6122 eth_hw_addr_random(dev);
6123 }
6124 }
6125
6126 port->tx_ring_size = MVPP2_MAX_TXD;
6127 port->rx_ring_size = MVPP2_MAX_RXD;
6128 port->dev = dev;
6129 SET_NETDEV_DEV(dev, &pdev->dev);
6130
6131 err = mvpp2_port_init(port);
6132 if (err < 0) {
6133 dev_err(&pdev->dev, "failed to init port %d\n", id);
6134 goto err_free_stats;
6135 }
6136 mvpp2_port_power_up(port);
6137
6138 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6139 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6140 dev->features = features | NETIF_F_RXCSUM;
6141 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6142 dev->vlan_features |= features;
6143
6144 err = register_netdev(dev);
6145 if (err < 0) {
6146 dev_err(&pdev->dev, "failed to register netdev\n");
6147 goto err_free_txq_pcpu;
6148 }
6149 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6150
6151
6152 *next_first_rxq += rxq_number;
6153 priv->port_list[id] = port;
6154 return 0;
6155
6156err_free_txq_pcpu:
6157 for (i = 0; i < txq_number; i++)
6158 free_percpu(port->txqs[i]->pcpu);
6159err_free_stats:
6160 free_percpu(port->stats);
6161err_free_irq:
6162 irq_dispose_mapping(port->irq);
6163err_free_netdev:
6164 free_netdev(dev);
6165 return err;
6166}
6167
6168
6169static void mvpp2_port_remove(struct mvpp2_port *port)
6170{
6171 int i;
6172
6173 unregister_netdev(port->dev);
6174 free_percpu(port->stats);
6175 for (i = 0; i < txq_number; i++)
6176 free_percpu(port->txqs[i]->pcpu);
6177 irq_dispose_mapping(port->irq);
6178 free_netdev(port->dev);
6179}
6180
6181
6182static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6183 struct mvpp2 *priv)
6184{
6185 u32 win_enable;
6186 int i;
6187
6188 for (i = 0; i < 6; i++) {
6189 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6190 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6191
6192 if (i < 4)
6193 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6194 }
6195
6196 win_enable = 0;
6197
6198 for (i = 0; i < dram->num_cs; i++) {
6199 const struct mbus_dram_window *cs = dram->cs + i;
6200
6201 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6202 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6203 dram->mbus_dram_target_id);
6204
6205 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6206 (cs->size - 1) & 0xffff0000);
6207
6208 win_enable |= (1 << i);
6209 }
6210
6211 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6212}
6213
6214
6215static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6216{
6217 int port;
6218
6219 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6220 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6221 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6222 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6223 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6224 }
6225
6226 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6227 MVPP2_RX_FIFO_PORT_MIN_PKT);
6228 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6229}
6230
6231
6232static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6233{
6234 const struct mbus_dram_target_info *dram_target_info;
6235 int err, i;
6236 u32 val;
6237
6238
6239 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6240 (txq_number > MVPP2_MAX_TXQ)) {
6241 dev_err(&pdev->dev, "invalid queue size parameter\n");
6242 return -EINVAL;
6243 }
6244
6245
6246 dram_target_info = mv_mbus_dram_info();
6247 if (dram_target_info)
6248 mvpp2_conf_mbus_windows(dram_target_info, priv);
6249
6250
6251 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6252 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6253 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6254
6255
6256 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6257 sizeof(struct mvpp2_tx_queue),
6258 GFP_KERNEL);
6259 if (!priv->aggr_txqs)
6260 return -ENOMEM;
6261
6262 for_each_present_cpu(i) {
6263 priv->aggr_txqs[i].id = i;
6264 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6265 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6266 MVPP2_AGGR_TXQ_SIZE, i, priv);
6267 if (err < 0)
6268 return err;
6269 }
6270
6271
6272 mvpp2_rx_fifo_init(priv);
6273
6274
6275 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6276 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6277
6278 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6279 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6280
6281
6282 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6283
6284
6285 err = mvpp2_bm_init(pdev, priv);
6286 if (err < 0)
6287 return err;
6288
6289
6290 err = mvpp2_prs_default_init(pdev, priv);
6291 if (err < 0)
6292 return err;
6293
6294
6295 mvpp2_cls_init(priv);
6296
6297 return 0;
6298}
6299
6300static int mvpp2_probe(struct platform_device *pdev)
6301{
6302 struct device_node *dn = pdev->dev.of_node;
6303 struct device_node *port_node;
6304 struct mvpp2 *priv;
6305 struct resource *res;
6306 int port_count, first_rxq;
6307 int err;
6308
6309 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6310 if (!priv)
6311 return -ENOMEM;
6312
6313 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6314 priv->base = devm_ioremap_resource(&pdev->dev, res);
6315 if (IS_ERR(priv->base))
6316 return PTR_ERR(priv->base);
6317
6318 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6319 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6320 if (IS_ERR(priv->lms_base))
6321 return PTR_ERR(priv->lms_base);
6322
6323 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6324 if (IS_ERR(priv->pp_clk))
6325 return PTR_ERR(priv->pp_clk);
6326 err = clk_prepare_enable(priv->pp_clk);
6327 if (err < 0)
6328 return err;
6329
6330 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6331 if (IS_ERR(priv->gop_clk)) {
6332 err = PTR_ERR(priv->gop_clk);
6333 goto err_pp_clk;
6334 }
6335 err = clk_prepare_enable(priv->gop_clk);
6336 if (err < 0)
6337 goto err_pp_clk;
6338
6339
6340 priv->tclk = clk_get_rate(priv->pp_clk);
6341
6342
6343 err = mvpp2_init(pdev, priv);
6344 if (err < 0) {
6345 dev_err(&pdev->dev, "failed to initialize controller\n");
6346 goto err_gop_clk;
6347 }
6348
6349 port_count = of_get_available_child_count(dn);
6350 if (port_count == 0) {
6351 dev_err(&pdev->dev, "no ports enabled\n");
6352 err = -ENODEV;
6353 goto err_gop_clk;
6354 }
6355
6356 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6357 sizeof(struct mvpp2_port *),
6358 GFP_KERNEL);
6359 if (!priv->port_list) {
6360 err = -ENOMEM;
6361 goto err_gop_clk;
6362 }
6363
6364
6365 first_rxq = 0;
6366 for_each_available_child_of_node(dn, port_node) {
6367 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6368 if (err < 0)
6369 goto err_gop_clk;
6370 }
6371
6372 platform_set_drvdata(pdev, priv);
6373 return 0;
6374
6375err_gop_clk:
6376 clk_disable_unprepare(priv->gop_clk);
6377err_pp_clk:
6378 clk_disable_unprepare(priv->pp_clk);
6379 return err;
6380}
6381
6382static int mvpp2_remove(struct platform_device *pdev)
6383{
6384 struct mvpp2 *priv = platform_get_drvdata(pdev);
6385 struct device_node *dn = pdev->dev.of_node;
6386 struct device_node *port_node;
6387 int i = 0;
6388
6389 for_each_available_child_of_node(dn, port_node) {
6390 if (priv->port_list[i])
6391 mvpp2_port_remove(priv->port_list[i]);
6392 i++;
6393 }
6394
6395 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6396 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6397
6398 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6399 }
6400
6401 for_each_present_cpu(i) {
6402 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6403
6404 dma_free_coherent(&pdev->dev,
6405 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6406 aggr_txq->descs,
6407 aggr_txq->descs_phys);
6408 }
6409
6410 clk_disable_unprepare(priv->pp_clk);
6411 clk_disable_unprepare(priv->gop_clk);
6412
6413 return 0;
6414}
6415
6416static const struct of_device_id mvpp2_match[] = {
6417 { .compatible = "marvell,armada-375-pp2" },
6418 { }
6419};
6420MODULE_DEVICE_TABLE(of, mvpp2_match);
6421
6422static struct platform_driver mvpp2_driver = {
6423 .probe = mvpp2_probe,
6424 .remove = mvpp2_remove,
6425 .driver = {
6426 .name = MVPP2_DRIVER_NAME,
6427 .of_match_table = mvpp2_match,
6428 },
6429};
6430
6431module_platform_driver(mvpp2_driver);
6432
6433MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6434MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6435MODULE_LICENSE("GPL v2");
6436