root/drivers/soc/fsl/dpio/qbman-portal.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. qbman_result_is_DQ
  2. qbman_result_is_SCN
  3. qbman_result_is_FQDAN
  4. qbman_result_is_CDAN
  5. qbman_result_is_CSCN
  6. qbman_result_is_BPSCN
  7. qbman_result_is_CGCU
  8. qbman_result_is_FQRN
  9. qbman_result_is_FQRNI
  10. qbman_result_is_FQPN
  11. qbman_result_SCN_state
  12. qbman_result_SCN_rid
  13. qbman_result_SCN_ctx
  14. qbman_swp_fq_schedule
  15. qbman_swp_fq_force
  16. qbman_swp_fq_xon
  17. qbman_swp_fq_xoff
  18. qbman_swp_CDAN_set_context
  19. qbman_swp_CDAN_enable
  20. qbman_swp_CDAN_disable
  21. qbman_swp_CDAN_set_context_enable
  22. qbman_swp_mc_complete

   1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
   2 /*
   3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
   4  * Copyright 2016-2019 NXP
   5  *
   6  */
   7 #ifndef __FSL_QBMAN_PORTAL_H
   8 #define __FSL_QBMAN_PORTAL_H
   9 
  10 #include <soc/fsl/dpaa2-fd.h>
  11 
  12 struct dpaa2_dq;
  13 struct qbman_swp;
  14 
  15 /* qbman software portal descriptor structure */
  16 struct qbman_swp_desc {
  17         void *cena_bar; /* Cache-enabled portal base address */
  18         void __iomem *cinh_bar; /* Cache-inhibited portal base address */
  19         u32 qman_version;
  20 };
  21 
  22 #define QBMAN_SWP_INTERRUPT_EQRI 0x01
  23 #define QBMAN_SWP_INTERRUPT_EQDI 0x02
  24 #define QBMAN_SWP_INTERRUPT_DQRI 0x04
  25 #define QBMAN_SWP_INTERRUPT_RCRI 0x08
  26 #define QBMAN_SWP_INTERRUPT_RCDI 0x10
  27 #define QBMAN_SWP_INTERRUPT_VDCI 0x20
  28 
  29 /* the structure for pull dequeue descriptor */
  30 struct qbman_pull_desc {
  31         u8 verb;
  32         u8 numf;
  33         u8 tok;
  34         u8 reserved;
  35         __le32 dq_src;
  36         __le64 rsp_addr;
  37         u64 rsp_addr_virt;
  38         u8 padding[40];
  39 };
  40 
  41 enum qbman_pull_type_e {
  42         /* dequeue with priority precedence, respect intra-class scheduling */
  43         qbman_pull_type_prio = 1,
  44         /* dequeue with active FQ precedence, respect ICS */
  45         qbman_pull_type_active,
  46         /* dequeue with active FQ precedence, no ICS */
  47         qbman_pull_type_active_noics
  48 };
  49 
  50 /* Definitions for parsing dequeue entries */
  51 #define QBMAN_RESULT_MASK      0x7f
  52 #define QBMAN_RESULT_DQ        0x60
  53 #define QBMAN_RESULT_FQRN      0x21
  54 #define QBMAN_RESULT_FQRNI     0x22
  55 #define QBMAN_RESULT_FQPN      0x24
  56 #define QBMAN_RESULT_FQDAN     0x25
  57 #define QBMAN_RESULT_CDAN      0x26
  58 #define QBMAN_RESULT_CSCN_MEM  0x27
  59 #define QBMAN_RESULT_CGCU      0x28
  60 #define QBMAN_RESULT_BPSCN     0x29
  61 #define QBMAN_RESULT_CSCN_WQ   0x2a
  62 
  63 /* QBMan FQ management command codes */
  64 #define QBMAN_FQ_SCHEDULE       0x48
  65 #define QBMAN_FQ_FORCE          0x49
  66 #define QBMAN_FQ_XON            0x4d
  67 #define QBMAN_FQ_XOFF           0x4e
  68 
  69 /* structure of enqueue descriptor */
  70 struct qbman_eq_desc {
  71         u8 verb;
  72         u8 dca;
  73         __le16 seqnum;
  74         __le16 orpid;
  75         __le16 reserved1;
  76         __le32 tgtid;
  77         __le32 tag;
  78         __le16 qdbin;
  79         u8 qpri;
  80         u8 reserved[3];
  81         u8 wae;
  82         u8 rspid;
  83         __le64 rsp_addr;
  84         u8 fd[32];
  85 };
  86 
  87 /* buffer release descriptor */
  88 struct qbman_release_desc {
  89         u8 verb;
  90         u8 reserved;
  91         __le16 bpid;
  92         __le32 reserved2;
  93         __le64 buf[7];
  94 };
  95 
  96 /* Management command result codes */
  97 #define QBMAN_MC_RSLT_OK      0xf0
  98 
  99 #define CODE_CDAN_WE_EN    0x1
 100 #define CODE_CDAN_WE_CTX   0x4
 101 
 102 /* portal data structure */
 103 struct qbman_swp {
 104         const struct qbman_swp_desc *desc;
 105         void *addr_cena;
 106         void __iomem *addr_cinh;
 107 
 108         /* Management commands */
 109         struct {
 110                 u32 valid_bit; /* 0x00 or 0x80 */
 111         } mc;
 112 
 113         /* Management response */
 114         struct {
 115                 u32 valid_bit; /* 0x00 or 0x80 */
 116         } mr;
 117 
 118         /* Push dequeues */
 119         u32 sdq;
 120 
 121         /* Volatile dequeues */
 122         struct {
 123                 atomic_t available; /* indicates if a command can be sent */
 124                 u32 valid_bit; /* 0x00 or 0x80 */
 125                 struct dpaa2_dq *storage; /* NULL if DQRR */
 126         } vdq;
 127 
 128         /* DQRR */
 129         struct {
 130                 u32 next_idx;
 131                 u32 valid_bit;
 132                 u8 dqrr_size;
 133                 int reset_bug; /* indicates dqrr reset workaround is needed */
 134         } dqrr;
 135 };
 136 
 137 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
 138 void qbman_swp_finish(struct qbman_swp *p);
 139 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
 140 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
 141 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
 142 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
 143 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
 144 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
 145 
 146 void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
 147 void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
 148 
 149 void qbman_pull_desc_clear(struct qbman_pull_desc *d);
 150 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
 151                                  struct dpaa2_dq *storage,
 152                                  dma_addr_t storage_phys,
 153                                  int stash);
 154 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
 155 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
 156 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
 157                             enum qbman_pull_type_e dct);
 158 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
 159                                  enum qbman_pull_type_e dct);
 160 
 161 int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
 162 
 163 const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
 164 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
 165 
 166 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
 167 
 168 void qbman_eq_desc_clear(struct qbman_eq_desc *d);
 169 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
 170 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
 171 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
 172 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
 173                           u32 qd_bin, u32 qd_prio);
 174 
 175 int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
 176                       const struct dpaa2_fd *fd);
 177 
 178 void qbman_release_desc_clear(struct qbman_release_desc *d);
 179 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
 180 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
 181 
 182 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
 183                       const u64 *buffers, unsigned int num_buffers);
 184 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
 185                       unsigned int num_buffers);
 186 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
 187                            u8 alt_fq_verb);
 188 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
 189                        u8 we_mask, u8 cdan_en,
 190                        u64 ctx);
 191 
 192 void *qbman_swp_mc_start(struct qbman_swp *p);
 193 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
 194 void *qbman_swp_mc_result(struct qbman_swp *p);
 195 
 196 /**
 197  * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
 198  * @dq: the dequeue result to be checked
 199  *
 200  * DQRR entries may contain non-dequeue results, ie. notifications
 201  */
 202 static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
 203 {
 204         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
 205 }
 206 
 207 /**
 208  * qbman_result_is_SCN() - Check the dequeue result is notification or not
 209  * @dq: the dequeue result to be checked
 210  *
 211  */
 212 static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
 213 {
 214         return !qbman_result_is_DQ(dq);
 215 }
 216 
 217 /* FQ Data Availability */
 218 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
 219 {
 220         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
 221 }
 222 
 223 /* Channel Data Availability */
 224 static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
 225 {
 226         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
 227 }
 228 
 229 /* Congestion State Change */
 230 static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
 231 {
 232         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
 233 }
 234 
 235 /* Buffer Pool State Change */
 236 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
 237 {
 238         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
 239 }
 240 
 241 /* Congestion Group Count Update */
 242 static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
 243 {
 244         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
 245 }
 246 
 247 /* Retirement */
 248 static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
 249 {
 250         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
 251 }
 252 
 253 /* Retirement Immediate */
 254 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
 255 {
 256         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
 257 }
 258 
 259  /* Park */
 260 static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
 261 {
 262         return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
 263 }
 264 
 265 /**
 266  * qbman_result_SCN_state() - Get the state field in State-change notification
 267  */
 268 static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
 269 {
 270         return scn->scn.state;
 271 }
 272 
 273 #define SCN_RID_MASK 0x00FFFFFF
 274 
 275 /**
 276  * qbman_result_SCN_rid() - Get the resource id in State-change notification
 277  */
 278 static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
 279 {
 280         return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
 281 }
 282 
 283 /**
 284  * qbman_result_SCN_ctx() - Get the context data in State-change notification
 285  */
 286 static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
 287 {
 288         return le64_to_cpu(scn->scn.ctx);
 289 }
 290 
 291 /**
 292  * qbman_swp_fq_schedule() - Move the fq to the scheduled state
 293  * @s:    the software portal object
 294  * @fqid: the index of frame queue to be scheduled
 295  *
 296  * There are a couple of different ways that a FQ can end up parked state,
 297  * This schedules it.
 298  *
 299  * Return 0 for success, or negative error code for failure.
 300  */
 301 static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
 302 {
 303         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
 304 }
 305 
 306 /**
 307  * qbman_swp_fq_force() - Force the FQ to fully scheduled state
 308  * @s:    the software portal object
 309  * @fqid: the index of frame queue to be forced
 310  *
 311  * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
 312  * and thus be available for selection by any channel-dequeuing behaviour (push
 313  * or pull). If the FQ is subsequently "dequeued" from the channel and is still
 314  * empty at the time this happens, the resulting dq_entry will have no FD.
 315  * (qbman_result_DQ_fd() will return NULL.)
 316  *
 317  * Return 0 for success, or negative error code for failure.
 318  */
 319 static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
 320 {
 321         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
 322 }
 323 
 324 /**
 325  * qbman_swp_fq_xon() - sets FQ flow-control to XON
 326  * @s:    the software portal object
 327  * @fqid: the index of frame queue
 328  *
 329  * This setting doesn't affect enqueues to the FQ, just dequeues.
 330  *
 331  * Return 0 for success, or negative error code for failure.
 332  */
 333 static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
 334 {
 335         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
 336 }
 337 
 338 /**
 339  * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
 340  * @s:    the software portal object
 341  * @fqid: the index of frame queue
 342  *
 343  * This setting doesn't affect enqueues to the FQ, just dequeues.
 344  * XOFF FQs will remain in the tenatively-scheduled state, even when
 345  * non-empty, meaning they won't be selected for scheduled dequeuing.
 346  * If a FQ is changed to XOFF after it had already become truly-scheduled
 347  * to a channel, and a pull dequeue of that channel occurs that selects
 348  * that FQ for dequeuing, then the resulting dq_entry will have no FD.
 349  * (qbman_result_DQ_fd() will return NULL.)
 350  *
 351  * Return 0 for success, or negative error code for failure.
 352  */
 353 static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
 354 {
 355         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
 356 }
 357 
 358 /* If the user has been allocated a channel object that is going to generate
 359  * CDANs to another channel, then the qbman_swp_CDAN* functions will be
 360  * necessary.
 361  *
 362  * CDAN-enabled channels only generate a single CDAN notification, after which
 363  * they need to be reenabled before they'll generate another. The idea is
 364  * that pull dequeuing will occur in reaction to the CDAN, followed by a
 365  * reenable step. Each function generates a distinct command to hardware, so a
 366  * combination function is provided if the user wishes to modify the "context"
 367  * (which shows up in each CDAN message) each time they reenable, as a single
 368  * command to hardware.
 369  */
 370 
 371 /**
 372  * qbman_swp_CDAN_set_context() - Set CDAN context
 373  * @s:         the software portal object
 374  * @channelid: the channel index
 375  * @ctx:       the context to be set in CDAN
 376  *
 377  * Return 0 for success, or negative error code for failure.
 378  */
 379 static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
 380                                              u64 ctx)
 381 {
 382         return qbman_swp_CDAN_set(s, channelid,
 383                                   CODE_CDAN_WE_CTX,
 384                                   0, ctx);
 385 }
 386 
 387 /**
 388  * qbman_swp_CDAN_enable() - Enable CDAN for the channel
 389  * @s:         the software portal object
 390  * @channelid: the index of the channel to generate CDAN
 391  *
 392  * Return 0 for success, or negative error code for failure.
 393  */
 394 static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
 395 {
 396         return qbman_swp_CDAN_set(s, channelid,
 397                                   CODE_CDAN_WE_EN,
 398                                   1, 0);
 399 }
 400 
 401 /**
 402  * qbman_swp_CDAN_disable() - disable CDAN for the channel
 403  * @s:         the software portal object
 404  * @channelid: the index of the channel to generate CDAN
 405  *
 406  * Return 0 for success, or negative error code for failure.
 407  */
 408 static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
 409 {
 410         return qbman_swp_CDAN_set(s, channelid,
 411                                   CODE_CDAN_WE_EN,
 412                                   0, 0);
 413 }
 414 
 415 /**
 416  * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
 417  * @s:         the software portal object
 418  * @channelid: the index of the channel to generate CDAN
 419  * @ctx:i      the context set in CDAN
 420  *
 421  * Return 0 for success, or negative error code for failure.
 422  */
 423 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
 424                                                     u16 channelid,
 425                                                     u64 ctx)
 426 {
 427         return qbman_swp_CDAN_set(s, channelid,
 428                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
 429                                   1, ctx);
 430 }
 431 
 432 /* Wraps up submit + poll-for-result */
 433 static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
 434                                           u8 cmd_verb)
 435 {
 436         int loopvar = 2000;
 437 
 438         qbman_swp_mc_submit(swp, cmd, cmd_verb);
 439 
 440         do {
 441                 cmd = qbman_swp_mc_result(swp);
 442         } while (!cmd && loopvar--);
 443 
 444         WARN_ON(!loopvar);
 445 
 446         return cmd;
 447 }
 448 
 449 /* Query APIs */
 450 struct qbman_fq_query_np_rslt {
 451         u8 verb;
 452         u8 rslt;
 453         u8 st1;
 454         u8 st2;
 455         u8 reserved[2];
 456         __le16 od1_sfdr;
 457         __le16 od2_sfdr;
 458         __le16 od3_sfdr;
 459         __le16 ra1_sfdr;
 460         __le16 ra2_sfdr;
 461         __le32 pfdr_hptr;
 462         __le32 pfdr_tptr;
 463         __le32 frm_cnt;
 464         __le32 byte_cnt;
 465         __le16 ics_surp;
 466         u8 is;
 467         u8 reserved2[29];
 468 };
 469 
 470 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
 471                          struct qbman_fq_query_np_rslt *r);
 472 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
 473 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
 474 
 475 struct qbman_bp_query_rslt {
 476         u8 verb;
 477         u8 rslt;
 478         u8 reserved[4];
 479         u8 bdi;
 480         u8 state;
 481         __le32 fill;
 482         __le32 hdotr;
 483         __le16 swdet;
 484         __le16 swdxt;
 485         __le16 hwdet;
 486         __le16 hwdxt;
 487         __le16 swset;
 488         __le16 swsxt;
 489         __le16 vbpid;
 490         __le16 icid;
 491         __le64 bpscn_addr;
 492         __le64 bpscn_ctx;
 493         __le16 hw_targ;
 494         u8 dbe;
 495         u8 reserved2;
 496         u8 sdcnt;
 497         u8 hdcnt;
 498         u8 sscnt;
 499         u8 reserved3[9];
 500 };
 501 
 502 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
 503                    struct qbman_bp_query_rslt *r);
 504 
 505 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
 506 
 507 #endif /* __FSL_QBMAN_PORTAL_H */

/* [<][>][^][v][top][bottom][index][help] */