linux/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
<<
>>
Prefs
   1/*******************************************************************************
   2  This contains the functions to handle the enhanced descriptors.
   3
   4  Copyright (C) 2007-2014  STMicroelectronics Ltd
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  The full GNU General Public License is included in this distribution in
  16  the file called "COPYING".
  17
  18  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  19*******************************************************************************/
  20
  21#include <linux/stmmac.h>
  22#include "common.h"
  23#include "descs_com.h"
  24
  25static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
  26                                  struct dma_desc *p, void __iomem *ioaddr)
  27{
  28        struct net_device_stats *stats = (struct net_device_stats *)data;
  29        unsigned int tdes0 = le32_to_cpu(p->des0);
  30        int ret = tx_done;
  31
  32        /* Get tx owner first */
  33        if (unlikely(tdes0 & ETDES0_OWN))
  34                return tx_dma_own;
  35
  36        /* Verify tx error by looking at the last segment. */
  37        if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
  38                return tx_not_ls;
  39
  40        if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
  41                if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
  42                        x->tx_jabber++;
  43
  44                if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
  45                        x->tx_frame_flushed++;
  46                        dwmac_dma_flush_tx_fifo(ioaddr);
  47                }
  48
  49                if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
  50                        x->tx_losscarrier++;
  51                        stats->tx_carrier_errors++;
  52                }
  53                if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
  54                        x->tx_carrier++;
  55                        stats->tx_carrier_errors++;
  56                }
  57                if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
  58                             (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
  59                        stats->collisions +=
  60                                (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
  61
  62                if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
  63                        x->tx_deferred++;
  64
  65                if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
  66                        dwmac_dma_flush_tx_fifo(ioaddr);
  67                        x->tx_underflow++;
  68                }
  69
  70                if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
  71                        x->tx_ip_header_error++;
  72
  73                if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
  74                        x->tx_payload_error++;
  75                        dwmac_dma_flush_tx_fifo(ioaddr);
  76                }
  77
  78                ret = tx_err;
  79        }
  80
  81        if (unlikely(tdes0 & ETDES0_DEFERRED))
  82                x->tx_deferred++;
  83
  84#ifdef STMMAC_VLAN_TAG_USED
  85        if (tdes0 & ETDES0_VLAN_FRAME)
  86                x->tx_vlan++;
  87#endif
  88
  89        return ret;
  90}
  91
  92static int enh_desc_get_tx_len(struct dma_desc *p)
  93{
  94        return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
  95}
  96
  97static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
  98{
  99        int ret = good_frame;
 100        u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
 101
 102        /* bits 5 7 0 | Frame status
 103         * ----------------------------------------------------------
 104         *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
 105         *      1 0 0 | IPv4/6 No CSUM errorS.
 106         *      1 0 1 | IPv4/6 CSUM PAYLOAD error
 107         *      1 1 0 | IPv4/6 CSUM IP HR error
 108         *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
 109         *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
 110         *      0 1 1 | COE bypassed.. no IPv4/6 frame
 111         *      0 1 0 | Reserved.
 112         */
 113        if (status == 0x0)
 114                ret = llc_snap;
 115        else if (status == 0x4)
 116                ret = good_frame;
 117        else if (status == 0x5)
 118                ret = csum_none;
 119        else if (status == 0x6)
 120                ret = csum_none;
 121        else if (status == 0x7)
 122                ret = csum_none;
 123        else if (status == 0x1)
 124                ret = discard_frame;
 125        else if (status == 0x3)
 126                ret = discard_frame;
 127        return ret;
 128}
 129
 130static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
 131                                    struct dma_extended_desc *p)
 132{
 133        unsigned int rdes0 = le32_to_cpu(p->basic.des0);
 134        unsigned int rdes4 = le32_to_cpu(p->des4);
 135
 136        if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
 137                int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
 138
 139                if (rdes4 & ERDES4_IP_HDR_ERR)
 140                        x->ip_hdr_err++;
 141                if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
 142                        x->ip_payload_err++;
 143                if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
 144                        x->ip_csum_bypassed++;
 145                if (rdes4 & ERDES4_IPV4_PKT_RCVD)
 146                        x->ipv4_pkt_rcvd++;
 147                if (rdes4 & ERDES4_IPV6_PKT_RCVD)
 148                        x->ipv6_pkt_rcvd++;
 149
 150                if (message_type == RDES_EXT_NO_PTP)
 151                        x->no_ptp_rx_msg_type_ext++;
 152                else if (message_type == RDES_EXT_SYNC)
 153                        x->ptp_rx_msg_type_sync++;
 154                else if (message_type == RDES_EXT_FOLLOW_UP)
 155                        x->ptp_rx_msg_type_follow_up++;
 156                else if (message_type == RDES_EXT_DELAY_REQ)
 157                        x->ptp_rx_msg_type_delay_req++;
 158                else if (message_type == RDES_EXT_DELAY_RESP)
 159                        x->ptp_rx_msg_type_delay_resp++;
 160                else if (message_type == RDES_EXT_PDELAY_REQ)
 161                        x->ptp_rx_msg_type_pdelay_req++;
 162                else if (message_type == RDES_EXT_PDELAY_RESP)
 163                        x->ptp_rx_msg_type_pdelay_resp++;
 164                else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
 165                        x->ptp_rx_msg_type_pdelay_follow_up++;
 166                else if (message_type == RDES_PTP_ANNOUNCE)
 167                        x->ptp_rx_msg_type_announce++;
 168                else if (message_type == RDES_PTP_MANAGEMENT)
 169                        x->ptp_rx_msg_type_management++;
 170                else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
 171                        x->ptp_rx_msg_pkt_reserved_type++;
 172
 173                if (rdes4 & ERDES4_PTP_FRAME_TYPE)
 174                        x->ptp_frame_type++;
 175                if (rdes4 & ERDES4_PTP_VER)
 176                        x->ptp_ver++;
 177                if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
 178                        x->timestamp_dropped++;
 179                if (rdes4 & ERDES4_AV_PKT_RCVD)
 180                        x->av_pkt_rcvd++;
 181                if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
 182                        x->av_tagged_pkt_rcvd++;
 183                if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
 184                        x->vlan_tag_priority_val++;
 185                if (rdes4 & ERDES4_L3_FILTER_MATCH)
 186                        x->l3_filter_match++;
 187                if (rdes4 & ERDES4_L4_FILTER_MATCH)
 188                        x->l4_filter_match++;
 189                if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
 190                        x->l3_l4_filter_no_match++;
 191        }
 192}
 193
 194static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 195                                  struct dma_desc *p)
 196{
 197        struct net_device_stats *stats = (struct net_device_stats *)data;
 198        unsigned int rdes0 = le32_to_cpu(p->des0);
 199        int ret = good_frame;
 200
 201        if (unlikely(rdes0 & RDES0_OWN))
 202                return dma_own;
 203
 204        if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
 205                if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
 206                        x->rx_desc++;
 207                        stats->rx_length_errors++;
 208                }
 209                if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
 210                        x->rx_gmac_overflow++;
 211
 212                if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
 213                        pr_err("\tIPC Csum Error/Giant frame\n");
 214
 215                if (unlikely(rdes0 & RDES0_COLLISION))
 216                        stats->collisions++;
 217                if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
 218                        x->rx_watchdog++;
 219
 220                if (unlikely(rdes0 & RDES0_MII_ERROR))  /* GMII */
 221                        x->rx_mii++;
 222
 223                if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
 224                        x->rx_crc_errors++;
 225                        stats->rx_crc_errors++;
 226                }
 227                ret = discard_frame;
 228        }
 229
 230        /* After a payload csum error, the ES bit is set.
 231         * It doesn't match with the information reported into the databook.
 232         * At any rate, we need to understand if the CSUM hw computation is ok
 233         * and report this info to the upper layers. */
 234        ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
 235                                 !!(rdes0 & RDES0_FRAME_TYPE),
 236                                 !!(rdes0 & ERDES0_RX_MAC_ADDR));
 237
 238        if (unlikely(rdes0 & RDES0_DRIBBLING))
 239                x->dribbling_bit++;
 240
 241        if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
 242                x->sa_rx_filter_fail++;
 243                ret = discard_frame;
 244        }
 245        if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
 246                x->da_rx_filter_fail++;
 247                ret = discard_frame;
 248        }
 249        if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
 250                x->rx_length++;
 251                ret = discard_frame;
 252        }
 253#ifdef STMMAC_VLAN_TAG_USED
 254        if (rdes0 & RDES0_VLAN_TAG)
 255                x->rx_vlan++;
 256#endif
 257
 258        return ret;
 259}
 260
 261static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
 262                                  int mode, int end)
 263{
 264        p->des0 |= cpu_to_le32(RDES0_OWN);
 265        p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
 266
 267        if (mode == STMMAC_CHAIN_MODE)
 268                ehn_desc_rx_set_on_chain(p);
 269        else
 270                ehn_desc_rx_set_on_ring(p, end);
 271
 272        if (disable_rx_ic)
 273                p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
 274}
 275
 276static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
 277{
 278        p->des0 &= cpu_to_le32(~ETDES0_OWN);
 279        if (mode == STMMAC_CHAIN_MODE)
 280                enh_desc_end_tx_desc_on_chain(p);
 281        else
 282                enh_desc_end_tx_desc_on_ring(p, end);
 283}
 284
 285static int enh_desc_get_tx_owner(struct dma_desc *p)
 286{
 287        return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
 288}
 289
 290static void enh_desc_set_tx_owner(struct dma_desc *p)
 291{
 292        p->des0 |= cpu_to_le32(ETDES0_OWN);
 293}
 294
 295static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
 296{
 297        p->des0 |= cpu_to_le32(RDES0_OWN);
 298}
 299
 300static int enh_desc_get_tx_ls(struct dma_desc *p)
 301{
 302        return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
 303}
 304
 305static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
 306{
 307        int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
 308
 309        memset(p, 0, offsetof(struct dma_desc, des2));
 310        if (mode == STMMAC_CHAIN_MODE)
 311                enh_desc_end_tx_desc_on_chain(p);
 312        else
 313                enh_desc_end_tx_desc_on_ring(p, ter);
 314}
 315
 316static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
 317                                     bool csum_flag, int mode, bool tx_own,
 318                                     bool ls, unsigned int tot_pkt_len)
 319{
 320        unsigned int tdes0 = le32_to_cpu(p->des0);
 321
 322        if (mode == STMMAC_CHAIN_MODE)
 323                enh_set_tx_desc_len_on_chain(p, len);
 324        else
 325                enh_set_tx_desc_len_on_ring(p, len);
 326
 327        if (is_fs)
 328                tdes0 |= ETDES0_FIRST_SEGMENT;
 329        else
 330                tdes0 &= ~ETDES0_FIRST_SEGMENT;
 331
 332        if (likely(csum_flag))
 333                tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
 334        else
 335                tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
 336
 337        if (ls)
 338                tdes0 |= ETDES0_LAST_SEGMENT;
 339
 340        /* Finally set the OWN bit. Later the DMA will start! */
 341        if (tx_own)
 342                tdes0 |= ETDES0_OWN;
 343
 344        if (is_fs && tx_own)
 345                /* When the own bit, for the first frame, has to be set, all
 346                 * descriptors for the same frame has to be set before, to
 347                 * avoid race condition.
 348                 */
 349                dma_wmb();
 350
 351        p->des0 = cpu_to_le32(tdes0);
 352}
 353
 354static void enh_desc_set_tx_ic(struct dma_desc *p)
 355{
 356        p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
 357}
 358
 359static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 360{
 361        unsigned int csum = 0;
 362        /* The type-1 checksum offload engines append the checksum at
 363         * the end of frame and the two bytes of checksum are added in
 364         * the length.
 365         * Adjust for that in the framelen for type-1 checksum offload
 366         * engines.
 367         */
 368        if (rx_coe_type == STMMAC_RX_COE_TYPE1)
 369                csum = 2;
 370
 371        return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
 372                                >> RDES0_FRAME_LEN_SHIFT) - csum);
 373}
 374
 375static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
 376{
 377        p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
 378}
 379
 380static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
 381{
 382        return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
 383}
 384
 385static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts)
 386{
 387        u64 ns;
 388
 389        if (ats) {
 390                struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
 391                ns = le32_to_cpu(p->des6);
 392                /* convert high/sec time stamp value to nanosecond */
 393                ns += le32_to_cpu(p->des7) * 1000000000ULL;
 394        } else {
 395                struct dma_desc *p = (struct dma_desc *)desc;
 396                ns = le32_to_cpu(p->des2);
 397                ns += le32_to_cpu(p->des3) * 1000000000ULL;
 398        }
 399
 400        *ts = ns;
 401}
 402
 403static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
 404                                            u32 ats)
 405{
 406        if (ats) {
 407                struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
 408                return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
 409        } else {
 410                struct dma_desc *p = (struct dma_desc *)desc;
 411                if ((le32_to_cpu(p->des2) == 0xffffffff) &&
 412                    (le32_to_cpu(p->des3) == 0xffffffff))
 413                        /* timestamp is corrupted, hence don't store it */
 414                        return 0;
 415                else
 416                        return 1;
 417        }
 418}
 419
 420static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
 421{
 422        struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
 423        int i;
 424
 425        pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
 426
 427        for (i = 0; i < size; i++) {
 428                u64 x;
 429
 430                x = *(u64 *)ep;
 431                pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 432                        i, (unsigned int)virt_to_phys(ep),
 433                        (unsigned int)x, (unsigned int)(x >> 32),
 434                        ep->basic.des2, ep->basic.des3);
 435                ep++;
 436        }
 437        pr_info("\n");
 438}
 439
 440static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
 441{
 442        *addr = le32_to_cpu(p->des2);
 443}
 444
 445static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
 446{
 447        p->des2 = cpu_to_le32(addr);
 448}
 449
 450static void enh_desc_clear(struct dma_desc *p)
 451{
 452        p->des2 = 0;
 453}
 454
 455const struct stmmac_desc_ops enh_desc_ops = {
 456        .tx_status = enh_desc_get_tx_status,
 457        .rx_status = enh_desc_get_rx_status,
 458        .get_tx_len = enh_desc_get_tx_len,
 459        .init_rx_desc = enh_desc_init_rx_desc,
 460        .init_tx_desc = enh_desc_init_tx_desc,
 461        .get_tx_owner = enh_desc_get_tx_owner,
 462        .release_tx_desc = enh_desc_release_tx_desc,
 463        .prepare_tx_desc = enh_desc_prepare_tx_desc,
 464        .set_tx_ic = enh_desc_set_tx_ic,
 465        .get_tx_ls = enh_desc_get_tx_ls,
 466        .set_tx_owner = enh_desc_set_tx_owner,
 467        .set_rx_owner = enh_desc_set_rx_owner,
 468        .get_rx_frame_len = enh_desc_get_rx_frame_len,
 469        .rx_extended_status = enh_desc_get_ext_status,
 470        .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
 471        .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
 472        .get_timestamp = enh_desc_get_timestamp,
 473        .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
 474        .display_ring = enh_desc_display_ring,
 475        .get_addr = enh_desc_get_addr,
 476        .set_addr = enh_desc_set_addr,
 477        .clear = enh_desc_clear,
 478};
 479