root/drivers/net/ethernet/stmicro/stmmac/enh_desc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. enh_desc_get_tx_status
  2. enh_desc_get_tx_len
  3. enh_desc_coe_rdes0
  4. enh_desc_get_ext_status
  5. enh_desc_get_rx_status
  6. enh_desc_init_rx_desc
  7. enh_desc_init_tx_desc
  8. enh_desc_get_tx_owner
  9. enh_desc_set_tx_owner
  10. enh_desc_set_rx_owner
  11. enh_desc_get_tx_ls
  12. enh_desc_release_tx_desc
  13. enh_desc_prepare_tx_desc
  14. enh_desc_set_tx_ic
  15. enh_desc_get_rx_frame_len
  16. enh_desc_enable_tx_timestamp
  17. enh_desc_get_tx_timestamp_status
  18. enh_desc_get_timestamp
  19. enh_desc_get_rx_timestamp_status
  20. enh_desc_display_ring
  21. enh_desc_get_addr
  22. enh_desc_set_addr
  23. enh_desc_clear

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*******************************************************************************
   3   This contains the functions to handle the enhanced descriptors.
   4 
   5   Copyright (C) 2007-2014  STMicroelectronics Ltd
   6 
   7 
   8   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   9 *******************************************************************************/
  10 
  11 #include <linux/stmmac.h>
  12 #include "common.h"
  13 #include "descs_com.h"
  14 
  15 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
  16                                   struct dma_desc *p, void __iomem *ioaddr)
  17 {
  18         struct net_device_stats *stats = (struct net_device_stats *)data;
  19         unsigned int tdes0 = le32_to_cpu(p->des0);
  20         int ret = tx_done;
  21 
  22         /* Get tx owner first */
  23         if (unlikely(tdes0 & ETDES0_OWN))
  24                 return tx_dma_own;
  25 
  26         /* Verify tx error by looking at the last segment. */
  27         if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
  28                 return tx_not_ls;
  29 
  30         if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
  31                 if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
  32                         x->tx_jabber++;
  33 
  34                 if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
  35                         x->tx_frame_flushed++;
  36                         dwmac_dma_flush_tx_fifo(ioaddr);
  37                 }
  38 
  39                 if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
  40                         x->tx_losscarrier++;
  41                         stats->tx_carrier_errors++;
  42                 }
  43                 if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
  44                         x->tx_carrier++;
  45                         stats->tx_carrier_errors++;
  46                 }
  47                 if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
  48                              (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
  49                         stats->collisions +=
  50                                 (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
  51 
  52                 if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
  53                         x->tx_deferred++;
  54 
  55                 if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
  56                         dwmac_dma_flush_tx_fifo(ioaddr);
  57                         x->tx_underflow++;
  58                 }
  59 
  60                 if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
  61                         x->tx_ip_header_error++;
  62 
  63                 if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
  64                         x->tx_payload_error++;
  65                         dwmac_dma_flush_tx_fifo(ioaddr);
  66                 }
  67 
  68                 ret = tx_err;
  69         }
  70 
  71         if (unlikely(tdes0 & ETDES0_DEFERRED))
  72                 x->tx_deferred++;
  73 
  74 #ifdef STMMAC_VLAN_TAG_USED
  75         if (tdes0 & ETDES0_VLAN_FRAME)
  76                 x->tx_vlan++;
  77 #endif
  78 
  79         return ret;
  80 }
  81 
  82 static int enh_desc_get_tx_len(struct dma_desc *p)
  83 {
  84         return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
  85 }
  86 
  87 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
  88 {
  89         int ret = good_frame;
  90         u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
  91 
  92         /* bits 5 7 0 | Frame status
  93          * ----------------------------------------------------------
  94          *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
  95          *      1 0 0 | IPv4/6 No CSUM errorS.
  96          *      1 0 1 | IPv4/6 CSUM PAYLOAD error
  97          *      1 1 0 | IPv4/6 CSUM IP HR error
  98          *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
  99          *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
 100          *      0 1 1 | COE bypassed.. no IPv4/6 frame
 101          *      0 1 0 | Reserved.
 102          */
 103         if (status == 0x0)
 104                 ret = llc_snap;
 105         else if (status == 0x4)
 106                 ret = good_frame;
 107         else if (status == 0x5)
 108                 ret = csum_none;
 109         else if (status == 0x6)
 110                 ret = csum_none;
 111         else if (status == 0x7)
 112                 ret = csum_none;
 113         else if (status == 0x1)
 114                 ret = discard_frame;
 115         else if (status == 0x3)
 116                 ret = discard_frame;
 117         return ret;
 118 }
 119 
 120 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
 121                                     struct dma_extended_desc *p)
 122 {
 123         unsigned int rdes0 = le32_to_cpu(p->basic.des0);
 124         unsigned int rdes4 = le32_to_cpu(p->des4);
 125 
 126         if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
 127                 int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
 128 
 129                 if (rdes4 & ERDES4_IP_HDR_ERR)
 130                         x->ip_hdr_err++;
 131                 if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
 132                         x->ip_payload_err++;
 133                 if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
 134                         x->ip_csum_bypassed++;
 135                 if (rdes4 & ERDES4_IPV4_PKT_RCVD)
 136                         x->ipv4_pkt_rcvd++;
 137                 if (rdes4 & ERDES4_IPV6_PKT_RCVD)
 138                         x->ipv6_pkt_rcvd++;
 139 
 140                 if (message_type == RDES_EXT_NO_PTP)
 141                         x->no_ptp_rx_msg_type_ext++;
 142                 else if (message_type == RDES_EXT_SYNC)
 143                         x->ptp_rx_msg_type_sync++;
 144                 else if (message_type == RDES_EXT_FOLLOW_UP)
 145                         x->ptp_rx_msg_type_follow_up++;
 146                 else if (message_type == RDES_EXT_DELAY_REQ)
 147                         x->ptp_rx_msg_type_delay_req++;
 148                 else if (message_type == RDES_EXT_DELAY_RESP)
 149                         x->ptp_rx_msg_type_delay_resp++;
 150                 else if (message_type == RDES_EXT_PDELAY_REQ)
 151                         x->ptp_rx_msg_type_pdelay_req++;
 152                 else if (message_type == RDES_EXT_PDELAY_RESP)
 153                         x->ptp_rx_msg_type_pdelay_resp++;
 154                 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
 155                         x->ptp_rx_msg_type_pdelay_follow_up++;
 156                 else if (message_type == RDES_PTP_ANNOUNCE)
 157                         x->ptp_rx_msg_type_announce++;
 158                 else if (message_type == RDES_PTP_MANAGEMENT)
 159                         x->ptp_rx_msg_type_management++;
 160                 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
 161                         x->ptp_rx_msg_pkt_reserved_type++;
 162 
 163                 if (rdes4 & ERDES4_PTP_FRAME_TYPE)
 164                         x->ptp_frame_type++;
 165                 if (rdes4 & ERDES4_PTP_VER)
 166                         x->ptp_ver++;
 167                 if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
 168                         x->timestamp_dropped++;
 169                 if (rdes4 & ERDES4_AV_PKT_RCVD)
 170                         x->av_pkt_rcvd++;
 171                 if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
 172                         x->av_tagged_pkt_rcvd++;
 173                 if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
 174                         x->vlan_tag_priority_val++;
 175                 if (rdes4 & ERDES4_L3_FILTER_MATCH)
 176                         x->l3_filter_match++;
 177                 if (rdes4 & ERDES4_L4_FILTER_MATCH)
 178                         x->l4_filter_match++;
 179                 if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
 180                         x->l3_l4_filter_no_match++;
 181         }
 182 }
 183 
 184 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 185                                   struct dma_desc *p)
 186 {
 187         struct net_device_stats *stats = (struct net_device_stats *)data;
 188         unsigned int rdes0 = le32_to_cpu(p->des0);
 189         int ret = good_frame;
 190 
 191         if (unlikely(rdes0 & RDES0_OWN))
 192                 return dma_own;
 193 
 194         if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
 195                 stats->rx_length_errors++;
 196                 return discard_frame;
 197         }
 198 
 199         if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
 200                 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
 201                         x->rx_desc++;
 202                         stats->rx_length_errors++;
 203                 }
 204                 if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
 205                         x->rx_gmac_overflow++;
 206 
 207                 if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
 208                         pr_err("\tIPC Csum Error/Giant frame\n");
 209 
 210                 if (unlikely(rdes0 & RDES0_COLLISION))
 211                         stats->collisions++;
 212                 if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
 213                         x->rx_watchdog++;
 214 
 215                 if (unlikely(rdes0 & RDES0_MII_ERROR))  /* GMII */
 216                         x->rx_mii++;
 217 
 218                 if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
 219                         x->rx_crc_errors++;
 220                         stats->rx_crc_errors++;
 221                 }
 222                 ret = discard_frame;
 223         }
 224 
 225         /* After a payload csum error, the ES bit is set.
 226          * It doesn't match with the information reported into the databook.
 227          * At any rate, we need to understand if the CSUM hw computation is ok
 228          * and report this info to the upper layers. */
 229         if (likely(ret == good_frame))
 230                 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
 231                                          !!(rdes0 & RDES0_FRAME_TYPE),
 232                                          !!(rdes0 & ERDES0_RX_MAC_ADDR));
 233 
 234         if (unlikely(rdes0 & RDES0_DRIBBLING))
 235                 x->dribbling_bit++;
 236 
 237         if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
 238                 x->sa_rx_filter_fail++;
 239                 ret = discard_frame;
 240         }
 241         if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
 242                 x->da_rx_filter_fail++;
 243                 ret = discard_frame;
 244         }
 245         if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
 246                 x->rx_length++;
 247                 ret = discard_frame;
 248         }
 249 #ifdef STMMAC_VLAN_TAG_USED
 250         if (rdes0 & RDES0_VLAN_TAG)
 251                 x->rx_vlan++;
 252 #endif
 253 
 254         return ret;
 255 }
 256 
 257 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
 258                                   int mode, int end, int bfsize)
 259 {
 260         int bfsize1;
 261 
 262         p->des0 |= cpu_to_le32(RDES0_OWN);
 263 
 264         bfsize1 = min(bfsize, BUF_SIZE_8KiB);
 265         p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
 266 
 267         if (mode == STMMAC_CHAIN_MODE)
 268                 ehn_desc_rx_set_on_chain(p);
 269         else
 270                 ehn_desc_rx_set_on_ring(p, end, bfsize);
 271 
 272         if (disable_rx_ic)
 273                 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
 274 }
 275 
 276 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
 277 {
 278         p->des0 &= cpu_to_le32(~ETDES0_OWN);
 279         if (mode == STMMAC_CHAIN_MODE)
 280                 enh_desc_end_tx_desc_on_chain(p);
 281         else
 282                 enh_desc_end_tx_desc_on_ring(p, end);
 283 }
 284 
 285 static int enh_desc_get_tx_owner(struct dma_desc *p)
 286 {
 287         return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
 288 }
 289 
 290 static void enh_desc_set_tx_owner(struct dma_desc *p)
 291 {
 292         p->des0 |= cpu_to_le32(ETDES0_OWN);
 293 }
 294 
 295 static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
 296 {
 297         p->des0 |= cpu_to_le32(RDES0_OWN);
 298 }
 299 
 300 static int enh_desc_get_tx_ls(struct dma_desc *p)
 301 {
 302         return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
 303 }
 304 
 305 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
 306 {
 307         int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
 308 
 309         memset(p, 0, offsetof(struct dma_desc, des2));
 310         if (mode == STMMAC_CHAIN_MODE)
 311                 enh_desc_end_tx_desc_on_chain(p);
 312         else
 313                 enh_desc_end_tx_desc_on_ring(p, ter);
 314 }
 315 
 316 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
 317                                      bool csum_flag, int mode, bool tx_own,
 318                                      bool ls, unsigned int tot_pkt_len)
 319 {
 320         unsigned int tdes0 = le32_to_cpu(p->des0);
 321 
 322         if (mode == STMMAC_CHAIN_MODE)
 323                 enh_set_tx_desc_len_on_chain(p, len);
 324         else
 325                 enh_set_tx_desc_len_on_ring(p, len);
 326 
 327         if (is_fs)
 328                 tdes0 |= ETDES0_FIRST_SEGMENT;
 329         else
 330                 tdes0 &= ~ETDES0_FIRST_SEGMENT;
 331 
 332         if (likely(csum_flag))
 333                 tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
 334         else
 335                 tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
 336 
 337         if (ls)
 338                 tdes0 |= ETDES0_LAST_SEGMENT;
 339 
 340         /* Finally set the OWN bit. Later the DMA will start! */
 341         if (tx_own)
 342                 tdes0 |= ETDES0_OWN;
 343 
 344         if (is_fs && tx_own)
 345                 /* When the own bit, for the first frame, has to be set, all
 346                  * descriptors for the same frame has to be set before, to
 347                  * avoid race condition.
 348                  */
 349                 dma_wmb();
 350 
 351         p->des0 = cpu_to_le32(tdes0);
 352 }
 353 
 354 static void enh_desc_set_tx_ic(struct dma_desc *p)
 355 {
 356         p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
 357 }
 358 
 359 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 360 {
 361         unsigned int csum = 0;
 362         /* The type-1 checksum offload engines append the checksum at
 363          * the end of frame and the two bytes of checksum are added in
 364          * the length.
 365          * Adjust for that in the framelen for type-1 checksum offload
 366          * engines.
 367          */
 368         if (rx_coe_type == STMMAC_RX_COE_TYPE1)
 369                 csum = 2;
 370 
 371         return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
 372                                 >> RDES0_FRAME_LEN_SHIFT) - csum);
 373 }
 374 
 375 static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
 376 {
 377         p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
 378 }
 379 
 380 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
 381 {
 382         return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
 383 }
 384 
 385 static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts)
 386 {
 387         u64 ns;
 388 
 389         if (ats) {
 390                 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
 391                 ns = le32_to_cpu(p->des6);
 392                 /* convert high/sec time stamp value to nanosecond */
 393                 ns += le32_to_cpu(p->des7) * 1000000000ULL;
 394         } else {
 395                 struct dma_desc *p = (struct dma_desc *)desc;
 396                 ns = le32_to_cpu(p->des2);
 397                 ns += le32_to_cpu(p->des3) * 1000000000ULL;
 398         }
 399 
 400         *ts = ns;
 401 }
 402 
 403 static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
 404                                             u32 ats)
 405 {
 406         if (ats) {
 407                 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
 408                 return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
 409         } else {
 410                 struct dma_desc *p = (struct dma_desc *)desc;
 411                 if ((le32_to_cpu(p->des2) == 0xffffffff) &&
 412                     (le32_to_cpu(p->des3) == 0xffffffff))
 413                         /* timestamp is corrupted, hence don't store it */
 414                         return 0;
 415                 else
 416                         return 1;
 417         }
 418 }
 419 
 420 static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
 421 {
 422         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
 423         int i;
 424 
 425         pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
 426 
 427         for (i = 0; i < size; i++) {
 428                 u64 x;
 429 
 430                 x = *(u64 *)ep;
 431                 pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 432                         i, (unsigned int)virt_to_phys(ep),
 433                         (unsigned int)x, (unsigned int)(x >> 32),
 434                         ep->basic.des2, ep->basic.des3);
 435                 ep++;
 436         }
 437         pr_info("\n");
 438 }
 439 
 440 static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
 441 {
 442         *addr = le32_to_cpu(p->des2);
 443 }
 444 
 445 static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
 446 {
 447         p->des2 = cpu_to_le32(addr);
 448 }
 449 
 450 static void enh_desc_clear(struct dma_desc *p)
 451 {
 452         p->des2 = 0;
 453 }
 454 
 455 const struct stmmac_desc_ops enh_desc_ops = {
 456         .tx_status = enh_desc_get_tx_status,
 457         .rx_status = enh_desc_get_rx_status,
 458         .get_tx_len = enh_desc_get_tx_len,
 459         .init_rx_desc = enh_desc_init_rx_desc,
 460         .init_tx_desc = enh_desc_init_tx_desc,
 461         .get_tx_owner = enh_desc_get_tx_owner,
 462         .release_tx_desc = enh_desc_release_tx_desc,
 463         .prepare_tx_desc = enh_desc_prepare_tx_desc,
 464         .set_tx_ic = enh_desc_set_tx_ic,
 465         .get_tx_ls = enh_desc_get_tx_ls,
 466         .set_tx_owner = enh_desc_set_tx_owner,
 467         .set_rx_owner = enh_desc_set_rx_owner,
 468         .get_rx_frame_len = enh_desc_get_rx_frame_len,
 469         .rx_extended_status = enh_desc_get_ext_status,
 470         .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
 471         .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
 472         .get_timestamp = enh_desc_get_timestamp,
 473         .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
 474         .display_ring = enh_desc_display_ring,
 475         .get_addr = enh_desc_get_addr,
 476         .set_addr = enh_desc_set_addr,
 477         .clear = enh_desc_clear,
 478 };

/* [<][>][^][v][top][bottom][index][help] */