root/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. mlx5e_xdp_tx_enable
  2. mlx5e_xdp_tx_disable
  3. mlx5e_xdp_tx_is_enabled
  4. mlx5e_xdp_is_active
  5. mlx5e_xmit_xdp_doorbell
  6. mlx5e_xdp_update_inline_state
  7. mlx5e_xdp_no_room_for_inline_pkt
  8. mlx5e_fill_xdpsq_frag_edge
  9. mlx5e_xdp_mpwqe_add_dseg
  10. mlx5e_xdpsq_fetch_wqe
  11. mlx5e_xdpi_fifo_push
  12. mlx5e_xdpi_fifo_pop

   1 /*
   2  * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 #ifndef __MLX5_EN_XDP_H__
  33 #define __MLX5_EN_XDP_H__
  34 
  35 #include "en.h"
  36 #include "en/txrx.h"
  37 
  38 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
  39 #define MLX5E_XDP_TX_EMPTY_DS_COUNT \
  40         (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
  41 #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
  42 
  43 #define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
  44 
  45 #define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
  46 #define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
  47         DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
  48 
  49 /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
  50  * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
  51  * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
  52  * full-session WQE be cache-aligned.
  53  */
  54 #if L1_CACHE_BYTES < 128
  55 #define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
  56 #else
  57 #define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
  58 #endif
  59 
  60 #define MLX5E_XDP_MPW_MAX_NUM_DS \
  61         (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
  62 
  63 struct mlx5e_xsk_param;
  64 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
  65 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
  66                       void *va, u16 *rx_headroom, u32 *len, bool xsk);
  67 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
  68 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
  69 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
  70 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
  71 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
  72 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
  73                    u32 flags);
  74 
  75 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
  76 {
  77         set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
  78 
  79         if (priv->channels.params.xdp_prog)
  80                 set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
  81 }
  82 
  83 static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
  84 {
  85         if (priv->channels.params.xdp_prog)
  86                 clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
  87 
  88         clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
  89         /* Let other device's napi(s) and XSK wakeups see our new state. */
  90         synchronize_rcu();
  91 }
  92 
  93 static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
  94 {
  95         return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
  96 }
  97 
  98 static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
  99 {
 100         return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
 101 }
 102 
 103 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
 104 {
 105         if (sq->doorbell_cseg) {
 106                 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
 107                 sq->doorbell_cseg = NULL;
 108         }
 109 }
 110 
 111 /* Enable inline WQEs to shift some load from a congested HCA (HW) to
 112  * a less congested cpu (SW).
 113  */
 114 static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
 115 {
 116         u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
 117         struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
 118 
 119 #define MLX5E_XDP_INLINE_WATERMARK_LOW  10
 120 #define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
 121 
 122         if (session->inline_on) {
 123                 if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
 124                         session->inline_on = 0;
 125                 return;
 126         }
 127 
 128         /* inline is false */
 129         if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
 130                 session->inline_on = 1;
 131 }
 132 
 133 static inline bool
 134 mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
 135 {
 136         return session->inline_on &&
 137                session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
 138 }
 139 
 140 static inline void
 141 mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
 142                            u16 pi, u16 nnops)
 143 {
 144         struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
 145 
 146         edge_wi = wi + nnops;
 147         /* fill sq frag edge with nops to avoid wqe wrapping two pages */
 148         for (; wi < edge_wi; wi++) {
 149                 wi->num_wqebbs = 1;
 150                 wi->num_pkts   = 0;
 151                 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
 152         }
 153 
 154         sq->stats->nops += nnops;
 155 }
 156 
 157 static inline void
 158 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
 159                          struct mlx5e_xdp_xmit_data *xdptxd,
 160                          struct mlx5e_xdpsq_stats *stats)
 161 {
 162         struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
 163         struct mlx5_wqe_data_seg *dseg =
 164                 (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
 165         u32 dma_len = xdptxd->len;
 166 
 167         session->pkt_count++;
 168 
 169         if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
 170                 struct mlx5_wqe_inline_seg *inline_dseg =
 171                         (struct mlx5_wqe_inline_seg *)dseg;
 172                 u16 ds_len = sizeof(*inline_dseg) + dma_len;
 173                 u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
 174 
 175                 inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
 176                 memcpy(inline_dseg->data, xdptxd->data, dma_len);
 177 
 178                 session->ds_count += ds_cnt;
 179                 stats->inlnw++;
 180                 return;
 181         }
 182 
 183         dseg->addr       = cpu_to_be64(xdptxd->dma_addr);
 184         dseg->byte_count = cpu_to_be32(dma_len);
 185         dseg->lkey       = sq->mkey_be;
 186         session->ds_count++;
 187 }
 188 
 189 static inline struct mlx5e_tx_wqe *
 190 mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
 191 {
 192         struct mlx5_wq_cyc *wq = &sq->wq;
 193         struct mlx5e_tx_wqe *wqe;
 194 
 195         *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 196         wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
 197         memset(wqe, 0, sizeof(*wqe));
 198 
 199         return wqe;
 200 }
 201 
 202 static inline void
 203 mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
 204                      struct mlx5e_xdp_info *xi)
 205 {
 206         u32 i = (*fifo->pc)++ & fifo->mask;
 207 
 208         fifo->xi[i] = *xi;
 209 }
 210 
 211 static inline struct mlx5e_xdp_info
 212 mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
 213 {
 214         return fifo->xi[(*fifo->cc)++ & fifo->mask];
 215 }
 216 #endif

/* [<][>][^][v][top][bottom][index][help] */