1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/clk.h>
14#include <linux/genalloc.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/mbus.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
24#include <net/hwbm.h>
25#include "mvneta_bm.h"
26
27#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
28#define MVNETA_BM_DRIVER_VERSION "1.0"
29
30static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
31{
32 writel(data, priv->reg_base + offset);
33}
34
35static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
36{
37 return readl(priv->reg_base + offset);
38}
39
40static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
41{
42 u32 val;
43
44 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
45 val |= MVNETA_BM_POOL_ENABLE_MASK;
46 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
47
48
49 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
50}
51
52static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
53{
54 u32 val;
55
56 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
57 val &= ~MVNETA_BM_POOL_ENABLE_MASK;
58 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
59}
60
61static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
62{
63 u32 val;
64
65 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
66 val |= mask;
67 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
68}
69
70static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
71{
72 u32 val;
73
74 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
75 val &= ~mask;
76 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
77}
78
79static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
80 u8 target_id, u8 attr)
81{
82 u32 val;
83
84 val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
85 val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
86 val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
87 val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
88 val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
89
90 mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
91}
92
93int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
94{
95 struct mvneta_bm_pool *bm_pool =
96 (struct mvneta_bm_pool *)hwbm_pool->priv;
97 struct mvneta_bm *priv = bm_pool->priv;
98 dma_addr_t phys_addr;
99
100
101
102
103
104 *(u32 *)buf = (u32)buf;
105 phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
106 DMA_FROM_DEVICE);
107 if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
108 return -ENOMEM;
109
110 mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
111 return 0;
112}
113EXPORT_SYMBOL_GPL(mvneta_bm_construct);
114
115
116static int mvneta_bm_pool_create(struct mvneta_bm *priv,
117 struct mvneta_bm_pool *bm_pool)
118{
119 struct platform_device *pdev = priv->pdev;
120 u8 target_id, attr;
121 int size_bytes, err;
122 size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
123 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
124 &bm_pool->phys_addr,
125 GFP_KERNEL);
126 if (!bm_pool->virt_addr)
127 return -ENOMEM;
128
129 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
130 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
131 bm_pool->phys_addr);
132 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
133 bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
134 return -ENOMEM;
135 }
136
137 err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
138 &attr);
139 if (err < 0) {
140 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
141 bm_pool->phys_addr);
142 return err;
143 }
144
145
146 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
147 bm_pool->phys_addr);
148
149 mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr);
150 mvneta_bm_pool_enable(priv, bm_pool->id);
151
152 return 0;
153}
154
155
156
157
158struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
159 enum mvneta_bm_type type, u8 port_id,
160 int pkt_size)
161{
162 struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
163 int num, err;
164
165 if (new_pool->type == MVNETA_BM_LONG &&
166 new_pool->port_map != 1 << port_id) {
167 dev_err(&priv->pdev->dev,
168 "long pool cannot be shared by the ports\n");
169 return NULL;
170 }
171
172 if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
173 dev_err(&priv->pdev->dev,
174 "mixing pools' types between the ports is forbidden\n");
175 return NULL;
176 }
177
178 if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
179 new_pool->pkt_size = pkt_size;
180
181
182 if (new_pool->type == MVNETA_BM_FREE) {
183 struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
184
185 new_pool->priv = priv;
186 new_pool->type = type;
187 new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
188 hwbm_pool->frag_size =
189 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
191 hwbm_pool->construct = mvneta_bm_construct;
192 hwbm_pool->priv = new_pool;
193 mutex_init(&hwbm_pool->buf_lock);
194
195
196 err = mvneta_bm_pool_create(priv, new_pool);
197 if (err) {
198 dev_err(&priv->pdev->dev, "fail to create pool %d\n",
199 new_pool->id);
200 return NULL;
201 }
202
203
204 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
205 if (num != hwbm_pool->size) {
206 WARN(1, "pool %d: %d of %d allocated\n",
207 new_pool->id, num, hwbm_pool->size);
208 return NULL;
209 }
210 }
211
212 return new_pool;
213}
214EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
215
216
217void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
218 u8 port_map)
219{
220 int i;
221
222 bm_pool->port_map &= ~port_map;
223 if (bm_pool->port_map)
224 return;
225
226 mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
227
228 for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
229 dma_addr_t buf_phys_addr;
230 u32 *vaddr;
231
232
233 buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
234
235
236
237
238 if (buf_phys_addr == 0)
239 continue;
240
241 vaddr = phys_to_virt(buf_phys_addr);
242 if (!vaddr)
243 break;
244
245 dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
246 bm_pool->buf_size, DMA_FROM_DEVICE);
247 hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
248 }
249
250 mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
251
252
253 bm_pool->hwbm_pool.buf_num -= i;
254}
255EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
256
257
258void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
259 struct mvneta_bm_pool *bm_pool, u8 port_map)
260{
261 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
262 bm_pool->port_map &= ~port_map;
263 if (bm_pool->port_map)
264 return;
265
266 bm_pool->type = MVNETA_BM_FREE;
267
268 mvneta_bm_bufs_free(priv, bm_pool, port_map);
269 if (hwbm_pool->buf_num)
270 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
271
272 if (bm_pool->virt_addr) {
273 dma_free_coherent(&priv->pdev->dev,
274 sizeof(u32) * hwbm_pool->size,
275 bm_pool->virt_addr, bm_pool->phys_addr);
276 bm_pool->virt_addr = NULL;
277 }
278
279 mvneta_bm_pool_disable(priv, bm_pool->id);
280}
281EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
282
283static void mvneta_bm_pools_init(struct mvneta_bm *priv)
284{
285 struct device_node *dn = priv->pdev->dev.of_node;
286 struct mvneta_bm_pool *bm_pool;
287 char prop[15];
288 u32 size;
289 int i;
290
291
292 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
293
294
295 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
296 bm_pool = &priv->bm_pools[i];
297 bm_pool->id = i;
298 bm_pool->type = MVNETA_BM_FREE;
299
300
301 mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
302
303
304 mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
305
306
307 sprintf(prop, "pool%d,capacity", i);
308 if (of_property_read_u32(dn, prop, &size)) {
309 size = MVNETA_BM_POOL_CAP_DEF;
310 } else if (size > MVNETA_BM_POOL_CAP_MAX) {
311 dev_warn(&priv->pdev->dev,
312 "Illegal pool %d capacity %d, set to %d\n",
313 i, size, MVNETA_BM_POOL_CAP_MAX);
314 size = MVNETA_BM_POOL_CAP_MAX;
315 } else if (size < MVNETA_BM_POOL_CAP_MIN) {
316 dev_warn(&priv->pdev->dev,
317 "Illegal pool %d capacity %d, set to %d\n",
318 i, size, MVNETA_BM_POOL_CAP_MIN);
319 size = MVNETA_BM_POOL_CAP_MIN;
320 } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
321 dev_warn(&priv->pdev->dev,
322 "Illegal pool %d capacity %d, round to %d\n",
323 i, size, ALIGN(size,
324 MVNETA_BM_POOL_CAP_ALIGN));
325 size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
326 }
327 bm_pool->hwbm_pool.size = size;
328
329 mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
330 bm_pool->hwbm_pool.size);
331
332
333 sprintf(prop, "pool%d,pkt-size", i);
334 if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
335 bm_pool->pkt_size = 0;
336 }
337}
338
339static void mvneta_bm_default_set(struct mvneta_bm *priv)
340{
341 u32 val;
342
343
344 mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
345
346
347 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
348
349
350 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
351
352
353 val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
354 val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
355 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
356}
357
358static int mvneta_bm_init(struct mvneta_bm *priv)
359{
360 mvneta_bm_default_set(priv);
361
362
363 priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
364 sizeof(struct mvneta_bm_pool),
365 GFP_KERNEL);
366 if (!priv->bm_pools)
367 return -ENOMEM;
368
369 mvneta_bm_pools_init(priv);
370
371 return 0;
372}
373
374static int mvneta_bm_get_sram(struct device_node *dn,
375 struct mvneta_bm *priv)
376{
377 priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
378 if (!priv->bppi_pool)
379 return -ENOMEM;
380
381 priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
382 MVNETA_BM_BPPI_SIZE,
383 &priv->bppi_phys_addr);
384 if (!priv->bppi_virt_addr)
385 return -ENOMEM;
386
387 return 0;
388}
389
390static void mvneta_bm_put_sram(struct mvneta_bm *priv)
391{
392 gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
393 MVNETA_BM_BPPI_SIZE);
394}
395
396struct mvneta_bm *mvneta_bm_get(struct device_node *node)
397{
398 struct platform_device *pdev = of_find_device_by_node(node);
399
400 return pdev ? platform_get_drvdata(pdev) : NULL;
401}
402EXPORT_SYMBOL_GPL(mvneta_bm_get);
403
404void mvneta_bm_put(struct mvneta_bm *priv)
405{
406 platform_device_put(priv->pdev);
407}
408EXPORT_SYMBOL_GPL(mvneta_bm_put);
409
410static int mvneta_bm_probe(struct platform_device *pdev)
411{
412 struct device_node *dn = pdev->dev.of_node;
413 struct mvneta_bm *priv;
414 struct resource *res;
415 int err;
416
417 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
418 if (!priv)
419 return -ENOMEM;
420
421 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
422 priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
423 if (IS_ERR(priv->reg_base))
424 return PTR_ERR(priv->reg_base);
425
426 priv->clk = devm_clk_get(&pdev->dev, NULL);
427 if (IS_ERR(priv->clk))
428 return PTR_ERR(priv->clk);
429 err = clk_prepare_enable(priv->clk);
430 if (err < 0)
431 return err;
432
433 err = mvneta_bm_get_sram(dn, priv);
434 if (err < 0) {
435 dev_err(&pdev->dev, "failed to allocate internal memory\n");
436 goto err_clk;
437 }
438
439 priv->pdev = pdev;
440
441
442 err = mvneta_bm_init(priv);
443 if (err < 0) {
444 dev_err(&pdev->dev, "failed to initialize controller\n");
445 goto err_sram;
446 }
447
448 dn->data = priv;
449 platform_set_drvdata(pdev, priv);
450
451 dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
452
453 return 0;
454
455err_sram:
456 mvneta_bm_put_sram(priv);
457err_clk:
458 clk_disable_unprepare(priv->clk);
459 return err;
460}
461
462static int mvneta_bm_remove(struct platform_device *pdev)
463{
464 struct mvneta_bm *priv = platform_get_drvdata(pdev);
465 u8 all_ports_map = 0xff;
466 int i = 0;
467
468 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
469 struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
470
471 mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
472 }
473
474 mvneta_bm_put_sram(priv);
475
476
477 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
478
479 clk_disable_unprepare(priv->clk);
480
481 return 0;
482}
483
484static const struct of_device_id mvneta_bm_match[] = {
485 { .compatible = "marvell,armada-380-neta-bm" },
486 { }
487};
488MODULE_DEVICE_TABLE(of, mvneta_bm_match);
489
490static struct platform_driver mvneta_bm_driver = {
491 .probe = mvneta_bm_probe,
492 .remove = mvneta_bm_remove,
493 .driver = {
494 .name = MVNETA_BM_DRIVER_NAME,
495 .of_match_table = mvneta_bm_match,
496 },
497};
498
499module_platform_driver(mvneta_bm_driver);
500
501MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
502MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
503MODULE_LICENSE("GPL v2");
504