This source file includes following definitions.
- qm_in
- qm_out
- qm_cl_invalidate
- qm_cl_touch_ro
- qm_ce_in
- eqcr_carryclear
- eqcr_ptr2idx
- eqcr_inc
- qm_eqcr_init
- qm_eqcr_get_ci_stashing
- qm_eqcr_finish
- qm_eqcr_start_no_stash
- qm_eqcr_start_stash
- eqcr_commit_checks
- qm_eqcr_pvb_commit
- qm_eqcr_cce_prefetch
- qm_eqcr_cce_update
- qm_eqcr_set_ithresh
- qm_eqcr_get_avail
- qm_eqcr_get_fill
- dqrr_carryclear
- dqrr_ptr2idx
- dqrr_inc
- qm_dqrr_set_maxfill
- qm_dqrr_init
- qm_dqrr_finish
- qm_dqrr_current
- qm_dqrr_next
- qm_dqrr_pvb_update
- qm_dqrr_cdc_consume_1ptr
- qm_dqrr_cdc_consume_n
- qm_dqrr_sdqcr_set
- qm_dqrr_vdqcr_set
- qm_dqrr_set_ithresh
- mr_carryclear
- mr_ptr2idx
- mr_inc
- qm_mr_init
- qm_mr_finish
- qm_mr_current
- qm_mr_next
- qm_mr_pvb_update
- qm_mr_cci_consume
- qm_mr_cci_consume_to_current
- qm_mr_set_ithresh
- qm_mc_init
- qm_mc_finish
- qm_mc_start
- qm_mc_commit
- qm_mc_result_timeout
- fq_set
- fq_clear
- fq_isset
- fq_isclear
- get_affine_portal
- put_affine_portal
- get_portal_for_channel
- qman_dqrr_set_ithresh
- qman_dqrr_get_ithresh
- qman_portal_get_iperiod
- qman_portal_set_iperiod
- qman_wq_alloc
- qman_enable_irqs
- qman_alloc_fq_table
- idx_to_fq
- fqid_to_fq
- tag_to_fq
- fq_to_tag
- portal_isr
- drain_mr_fqrni
- qman_create_portal
- qman_create_affine_portal
- qman_destroy_portal
- qman_destroy_affine_portal
- fq_state_change
- qm_congestion_task
- qm_mr_process_task
- __poll_portal_slow
- clear_vdqcr
- __poll_portal_fast
- qman_p_irqsource_add
- qman_p_irqsource_remove
- qman_affine_cpus
- qman_affine_channel
- qman_get_affine_portal
- qman_p_poll_dqrr
- qman_p_static_dequeue_add
- mcr_result_str
- qman_create_fq
- qman_destroy_fq
- qman_fq_fqid
- qman_init_fq
- qman_schedule_fq
- qman_retire_fq
- qman_oos_fq
- qman_query_fq
- qman_query_fq_np
- qman_query_cgr
- qman_query_cgr_congested
- set_p_vdqcr
- set_vdqcr
- wait_vdqcr_start
- qman_volatile_dequeue
- update_eqcr_ci
- qman_enqueue
- qm_modify_cgr
- qm_cgr_cscn_targ_set
- qm_cgr_cscn_targ_clear
- qman_init_cgr_all
- qman_create_cgr
- qman_delete_cgr
- qman_delete_cgr_smp_call
- qman_delete_cgr_safe
- _qm_mr_consume_and_match_verb
- _qm_dqrr_consume_and_match
- qman_shutdown_fq
- qman_get_qm_portal_config
- qman_alloc_range
- qman_alloc_fqid_range
- qman_alloc_pool_range
- qman_alloc_cgrid_range
- qman_release_fqid
- qpool_cleanup
- qman_release_pool
- cgr_cleanup
- qman_release_cgrid
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 
  31 #include "qman_priv.h"
  32 
  33 #define DQRR_MAXFILL    15
  34 #define EQCR_ITHRESH    4       
  35 #define IRQNAME         "QMan portal %d"
  36 #define MAX_IRQNAME     16      
  37 #define QMAN_POLL_LIMIT 32
  38 #define QMAN_PIRQ_DQRR_ITHRESH 12
  39 #define QMAN_DQRR_IT_MAX 15
  40 #define QMAN_ITP_MAX 0xFFF
  41 #define QMAN_PIRQ_MR_ITHRESH 4
  42 #define QMAN_PIRQ_IPERIOD 100
  43 
  44 
  45 
  46 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  47 
  48 #define QM_REG_EQCR_PI_CINH     0x3000
  49 #define QM_REG_EQCR_CI_CINH     0x3040
  50 #define QM_REG_EQCR_ITR         0x3080
  51 #define QM_REG_DQRR_PI_CINH     0x3100
  52 #define QM_REG_DQRR_CI_CINH     0x3140
  53 #define QM_REG_DQRR_ITR         0x3180
  54 #define QM_REG_DQRR_DCAP        0x31C0
  55 #define QM_REG_DQRR_SDQCR       0x3200
  56 #define QM_REG_DQRR_VDQCR       0x3240
  57 #define QM_REG_DQRR_PDQCR       0x3280
  58 #define QM_REG_MR_PI_CINH       0x3300
  59 #define QM_REG_MR_CI_CINH       0x3340
  60 #define QM_REG_MR_ITR           0x3380
  61 #define QM_REG_CFG              0x3500
  62 #define QM_REG_ISR              0x3600
  63 #define QM_REG_IER              0x3640
  64 #define QM_REG_ISDR             0x3680
  65 #define QM_REG_IIR              0x36C0
  66 #define QM_REG_ITPR             0x3740
  67 
  68 
  69 #define QM_CL_EQCR              0x0000
  70 #define QM_CL_DQRR              0x1000
  71 #define QM_CL_MR                0x2000
  72 #define QM_CL_EQCR_PI_CENA      0x3000
  73 #define QM_CL_EQCR_CI_CENA      0x3040
  74 #define QM_CL_DQRR_PI_CENA      0x3100
  75 #define QM_CL_DQRR_CI_CENA      0x3140
  76 #define QM_CL_MR_PI_CENA        0x3300
  77 #define QM_CL_MR_CI_CENA        0x3340
  78 #define QM_CL_CR                0x3800
  79 #define QM_CL_RR0               0x3900
  80 #define QM_CL_RR1               0x3940
  81 
  82 #else
  83 
  84 #define QM_REG_EQCR_PI_CINH     0x0000
  85 #define QM_REG_EQCR_CI_CINH     0x0004
  86 #define QM_REG_EQCR_ITR         0x0008
  87 #define QM_REG_DQRR_PI_CINH     0x0040
  88 #define QM_REG_DQRR_CI_CINH     0x0044
  89 #define QM_REG_DQRR_ITR         0x0048
  90 #define QM_REG_DQRR_DCAP        0x0050
  91 #define QM_REG_DQRR_SDQCR       0x0054
  92 #define QM_REG_DQRR_VDQCR       0x0058
  93 #define QM_REG_DQRR_PDQCR       0x005c
  94 #define QM_REG_MR_PI_CINH       0x0080
  95 #define QM_REG_MR_CI_CINH       0x0084
  96 #define QM_REG_MR_ITR           0x0088
  97 #define QM_REG_CFG              0x0100
  98 #define QM_REG_ISR              0x0e00
  99 #define QM_REG_IER              0x0e04
 100 #define QM_REG_ISDR             0x0e08
 101 #define QM_REG_IIR              0x0e0c
 102 #define QM_REG_ITPR             0x0e14
 103 
 104 
 105 #define QM_CL_EQCR              0x0000
 106 #define QM_CL_DQRR              0x1000
 107 #define QM_CL_MR                0x2000
 108 #define QM_CL_EQCR_PI_CENA      0x3000
 109 #define QM_CL_EQCR_CI_CENA      0x3100
 110 #define QM_CL_DQRR_PI_CENA      0x3200
 111 #define QM_CL_DQRR_CI_CENA      0x3300
 112 #define QM_CL_MR_PI_CENA        0x3400
 113 #define QM_CL_MR_CI_CENA        0x3500
 114 #define QM_CL_CR                0x3800
 115 #define QM_CL_RR0               0x3900
 116 #define QM_CL_RR1               0x3940
 117 #endif
 118 
 119 
 120 
 121 
 122 
 123 
 124 
 125 
 126 
 127 
 128 #define qm_cl(base, idx)        ((void *)base + ((idx) << 6))
 129 
 130 
 131 
 132 
 133 
 134 
 135 
 136 
 137 
 138 
 139 
 140 
 141 
 142 
 143 
 144 enum qm_eqcr_pmode {            
 145         qm_eqcr_pci = 0,        
 146         qm_eqcr_pce = 1,        
 147         qm_eqcr_pvb = 2         
 148 };
 149 enum qm_dqrr_dmode {            
 150         qm_dqrr_dpush = 0,      
 151         qm_dqrr_dpull = 1       
 152 };
 153 enum qm_dqrr_pmode {            
 154         qm_dqrr_pci,            
 155         qm_dqrr_pce,            
 156         qm_dqrr_pvb             
 157 };
 158 enum qm_dqrr_cmode {            
 159         qm_dqrr_cci = 0,        
 160         qm_dqrr_cce = 1,        
 161         qm_dqrr_cdc = 2         
 162 };
 163 enum qm_mr_pmode {              
 164         qm_mr_pci,              
 165         qm_mr_pce,              
 166         qm_mr_pvb               
 167 };
 168 enum qm_mr_cmode {              
 169         qm_mr_cci = 0,          
 170         qm_mr_cce = 1           
 171 };
 172 
 173 
 174 
 175 #define QM_EQCR_SIZE            8
 176 #define QM_DQRR_SIZE            16
 177 #define QM_MR_SIZE              8
 178 
 179 
 180 struct qm_eqcr_entry {
 181         u8 _ncw_verb; 
 182         u8 dca;
 183         __be16 seqnum;
 184         u8 __reserved[4];
 185         __be32 fqid;    
 186         __be32 tag;
 187         struct qm_fd fd;
 188         u8 __reserved3[32];
 189 } __packed;
 190 #define QM_EQCR_VERB_VBIT               0x80
 191 #define QM_EQCR_VERB_CMD_MASK           0x61    
 192 #define QM_EQCR_VERB_CMD_ENQUEUE        0x01
 193 #define QM_EQCR_SEQNUM_NESN             0x8000  
 194 #define QM_EQCR_SEQNUM_NLIS             0x4000  
 195 #define QM_EQCR_SEQNUM_SEQMASK          0x3fff  
 196 
 197 struct qm_eqcr {
 198         struct qm_eqcr_entry *ring, *cursor;
 199         u8 ci, available, ithresh, vbit;
 200 #ifdef CONFIG_FSL_DPAA_CHECKING
 201         u32 busy;
 202         enum qm_eqcr_pmode pmode;
 203 #endif
 204 };
 205 
 206 struct qm_dqrr {
 207         const struct qm_dqrr_entry *ring, *cursor;
 208         u8 pi, ci, fill, ithresh, vbit;
 209 #ifdef CONFIG_FSL_DPAA_CHECKING
 210         enum qm_dqrr_dmode dmode;
 211         enum qm_dqrr_pmode pmode;
 212         enum qm_dqrr_cmode cmode;
 213 #endif
 214 };
 215 
 216 struct qm_mr {
 217         union qm_mr_entry *ring, *cursor;
 218         u8 pi, ci, fill, ithresh, vbit;
 219 #ifdef CONFIG_FSL_DPAA_CHECKING
 220         enum qm_mr_pmode pmode;
 221         enum qm_mr_cmode cmode;
 222 #endif
 223 };
 224 
 225 
 226 
 227 struct qm_mcc_fq {
 228         u8 _ncw_verb;
 229         u8 __reserved1[3];
 230         __be32 fqid;    
 231         u8 __reserved2[56];
 232 } __packed;
 233 
 234 
 235 struct qm_mcc_cgr {
 236         u8 _ncw_verb;
 237         u8 __reserved1[30];
 238         u8 cgid;
 239         u8 __reserved2[32];
 240 };
 241 
 242 #define QM_MCC_VERB_VBIT                0x80
 243 #define QM_MCC_VERB_MASK                0x7f    
 244 #define QM_MCC_VERB_INITFQ_PARKED       0x40
 245 #define QM_MCC_VERB_INITFQ_SCHED        0x41
 246 #define QM_MCC_VERB_QUERYFQ             0x44
 247 #define QM_MCC_VERB_QUERYFQ_NP          0x45    
 248 #define QM_MCC_VERB_QUERYWQ             0x46
 249 #define QM_MCC_VERB_QUERYWQ_DEDICATED   0x47
 250 #define QM_MCC_VERB_ALTER_SCHED         0x48    
 251 #define QM_MCC_VERB_ALTER_FE            0x49    
 252 #define QM_MCC_VERB_ALTER_RETIRE        0x4a    
 253 #define QM_MCC_VERB_ALTER_OOS           0x4b    
 254 #define QM_MCC_VERB_ALTER_FQXON         0x4d    
 255 #define QM_MCC_VERB_ALTER_FQXOFF        0x4e    
 256 #define QM_MCC_VERB_INITCGR             0x50
 257 #define QM_MCC_VERB_MODIFYCGR           0x51
 258 #define QM_MCC_VERB_CGRTESTWRITE        0x52
 259 #define QM_MCC_VERB_QUERYCGR            0x58
 260 #define QM_MCC_VERB_QUERYCONGESTION     0x59
 261 union qm_mc_command {
 262         struct {
 263                 u8 _ncw_verb; 
 264                 u8 __reserved[63];
 265         };
 266         struct qm_mcc_initfq initfq;
 267         struct qm_mcc_initcgr initcgr;
 268         struct qm_mcc_fq fq;
 269         struct qm_mcc_cgr cgr;
 270 };
 271 
 272 
 273 
 274 struct qm_mcr_queryfq {
 275         u8 verb;
 276         u8 result;
 277         u8 __reserved1[8];
 278         struct qm_fqd fqd;      
 279         u8 __reserved2[30];
 280 } __packed;
 281 
 282 
 283 struct qm_mcr_alterfq {
 284         u8 verb;
 285         u8 result;
 286         u8 fqs;         
 287         u8 __reserved1[61];
 288 };
 289 #define QM_MCR_VERB_RRID                0x80
 290 #define QM_MCR_VERB_MASK                QM_MCC_VERB_MASK
 291 #define QM_MCR_VERB_INITFQ_PARKED       QM_MCC_VERB_INITFQ_PARKED
 292 #define QM_MCR_VERB_INITFQ_SCHED        QM_MCC_VERB_INITFQ_SCHED
 293 #define QM_MCR_VERB_QUERYFQ             QM_MCC_VERB_QUERYFQ
 294 #define QM_MCR_VERB_QUERYFQ_NP          QM_MCC_VERB_QUERYFQ_NP
 295 #define QM_MCR_VERB_QUERYWQ             QM_MCC_VERB_QUERYWQ
 296 #define QM_MCR_VERB_QUERYWQ_DEDICATED   QM_MCC_VERB_QUERYWQ_DEDICATED
 297 #define QM_MCR_VERB_ALTER_SCHED         QM_MCC_VERB_ALTER_SCHED
 298 #define QM_MCR_VERB_ALTER_FE            QM_MCC_VERB_ALTER_FE
 299 #define QM_MCR_VERB_ALTER_RETIRE        QM_MCC_VERB_ALTER_RETIRE
 300 #define QM_MCR_VERB_ALTER_OOS           QM_MCC_VERB_ALTER_OOS
 301 #define QM_MCR_RESULT_NULL              0x00
 302 #define QM_MCR_RESULT_OK                0xf0
 303 #define QM_MCR_RESULT_ERR_FQID          0xf1
 304 #define QM_MCR_RESULT_ERR_FQSTATE       0xf2
 305 #define QM_MCR_RESULT_ERR_NOTEMPTY      0xf3    
 306 #define QM_MCR_RESULT_ERR_BADCHANNEL    0xf4
 307 #define QM_MCR_RESULT_PENDING           0xf8
 308 #define QM_MCR_RESULT_ERR_BADCOMMAND    0xff
 309 #define QM_MCR_FQS_ORLPRESENT           0x02    
 310 #define QM_MCR_FQS_NOTEMPTY             0x01    
 311 #define QM_MCR_TIMEOUT                  10000   
 312 union qm_mc_result {
 313         struct {
 314                 u8 verb;
 315                 u8 result;
 316                 u8 __reserved1[62];
 317         };
 318         struct qm_mcr_queryfq queryfq;
 319         struct qm_mcr_alterfq alterfq;
 320         struct qm_mcr_querycgr querycgr;
 321         struct qm_mcr_querycongestion querycongestion;
 322         struct qm_mcr_querywq querywq;
 323         struct qm_mcr_queryfq_np queryfq_np;
 324 };
 325 
 326 struct qm_mc {
 327         union qm_mc_command *cr;
 328         union qm_mc_result *rr;
 329         u8 rridx, vbit;
 330 #ifdef CONFIG_FSL_DPAA_CHECKING
 331         enum {
 332                 
 333                 qman_mc_idle,
 334                 
 335                 qman_mc_user,
 336                 
 337                 qman_mc_hw
 338         } state;
 339 #endif
 340 };
 341 
 342 struct qm_addr {
 343         void *ce;               
 344         __be32 *ce_be;          
 345         void __iomem *ci;       
 346 };
 347 
 348 struct qm_portal {
 349         
 350 
 351 
 352 
 353 
 354 
 355         struct qm_addr addr;
 356         struct qm_eqcr eqcr;
 357         struct qm_dqrr dqrr;
 358         struct qm_mr mr;
 359         struct qm_mc mc;
 360 } ____cacheline_aligned;
 361 
 362 
 363 static inline u32 qm_in(struct qm_portal *p, u32 offset)
 364 {
 365         return ioread32be(p->addr.ci + offset);
 366 }
 367 
 368 static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
 369 {
 370         iowrite32be(val, p->addr.ci + offset);
 371 }
 372 
 373 
 374 static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
 375 {
 376         dpaa_invalidate(p->addr.ce + offset);
 377 }
 378 
 379 static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
 380 {
 381         dpaa_touch_ro(p->addr.ce + offset);
 382 }
 383 
 384 static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
 385 {
 386         return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
 387 }
 388 
 389 
 390 
 391 #define EQCR_SHIFT      ilog2(sizeof(struct qm_eqcr_entry))
 392 #define EQCR_CARRY      (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
 393 
 394 
 395 static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
 396 {
 397         uintptr_t addr = (uintptr_t)p;
 398 
 399         addr &= ~EQCR_CARRY;
 400 
 401         return (struct qm_eqcr_entry *)addr;
 402 }
 403 
 404 
 405 static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
 406 {
 407         return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
 408 }
 409 
 410 
 411 static inline void eqcr_inc(struct qm_eqcr *eqcr)
 412 {
 413         
 414         struct qm_eqcr_entry *partial = eqcr->cursor + 1;
 415 
 416         eqcr->cursor = eqcr_carryclear(partial);
 417         if (partial != eqcr->cursor)
 418                 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
 419 }
 420 
 421 static inline int qm_eqcr_init(struct qm_portal *portal,
 422                                 enum qm_eqcr_pmode pmode,
 423                                 unsigned int eq_stash_thresh,
 424                                 int eq_stash_prio)
 425 {
 426         struct qm_eqcr *eqcr = &portal->eqcr;
 427         u32 cfg;
 428         u8 pi;
 429 
 430         eqcr->ring = portal->addr.ce + QM_CL_EQCR;
 431         eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
 432         qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
 433         pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
 434         eqcr->cursor = eqcr->ring + pi;
 435         eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
 436                      QM_EQCR_VERB_VBIT : 0;
 437         eqcr->available = QM_EQCR_SIZE - 1 -
 438                           dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
 439         eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
 440 #ifdef CONFIG_FSL_DPAA_CHECKING
 441         eqcr->busy = 0;
 442         eqcr->pmode = pmode;
 443 #endif
 444         cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
 445               (eq_stash_thresh << 28) | 
 446               (eq_stash_prio << 26) | 
 447               ((pmode & 0x3) << 24); 
 448         qm_out(portal, QM_REG_CFG, cfg);
 449         return 0;
 450 }
 451 
 452 static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
 453 {
 454         return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
 455 }
 456 
 457 static inline void qm_eqcr_finish(struct qm_portal *portal)
 458 {
 459         struct qm_eqcr *eqcr = &portal->eqcr;
 460         u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
 461         u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
 462 
 463         DPAA_ASSERT(!eqcr->busy);
 464         if (pi != eqcr_ptr2idx(eqcr->cursor))
 465                 pr_crit("losing uncommitted EQCR entries\n");
 466         if (ci != eqcr->ci)
 467                 pr_crit("missing existing EQCR completions\n");
 468         if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
 469                 pr_crit("EQCR destroyed unquiesced\n");
 470 }
 471 
 472 static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
 473                                                                  *portal)
 474 {
 475         struct qm_eqcr *eqcr = &portal->eqcr;
 476 
 477         DPAA_ASSERT(!eqcr->busy);
 478         if (!eqcr->available)
 479                 return NULL;
 480 
 481 #ifdef CONFIG_FSL_DPAA_CHECKING
 482         eqcr->busy = 1;
 483 #endif
 484         dpaa_zero(eqcr->cursor);
 485         return eqcr->cursor;
 486 }
 487 
 488 static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
 489                                                                 *portal)
 490 {
 491         struct qm_eqcr *eqcr = &portal->eqcr;
 492         u8 diff, old_ci;
 493 
 494         DPAA_ASSERT(!eqcr->busy);
 495         if (!eqcr->available) {
 496                 old_ci = eqcr->ci;
 497                 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
 498                            (QM_EQCR_SIZE - 1);
 499                 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
 500                 eqcr->available += diff;
 501                 if (!diff)
 502                         return NULL;
 503         }
 504 #ifdef CONFIG_FSL_DPAA_CHECKING
 505         eqcr->busy = 1;
 506 #endif
 507         dpaa_zero(eqcr->cursor);
 508         return eqcr->cursor;
 509 }
 510 
 511 static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
 512 {
 513         DPAA_ASSERT(eqcr->busy);
 514         DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
 515         DPAA_ASSERT(eqcr->available >= 1);
 516 }
 517 
 518 static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
 519 {
 520         struct qm_eqcr *eqcr = &portal->eqcr;
 521         struct qm_eqcr_entry *eqcursor;
 522 
 523         eqcr_commit_checks(eqcr);
 524         DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
 525         dma_wmb();
 526         eqcursor = eqcr->cursor;
 527         eqcursor->_ncw_verb = myverb | eqcr->vbit;
 528         dpaa_flush(eqcursor);
 529         eqcr_inc(eqcr);
 530         eqcr->available--;
 531 #ifdef CONFIG_FSL_DPAA_CHECKING
 532         eqcr->busy = 0;
 533 #endif
 534 }
 535 
 536 static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
 537 {
 538         qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
 539 }
 540 
 541 static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
 542 {
 543         struct qm_eqcr *eqcr = &portal->eqcr;
 544         u8 diff, old_ci = eqcr->ci;
 545 
 546         eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
 547         qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
 548         diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
 549         eqcr->available += diff;
 550         return diff;
 551 }
 552 
 553 static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
 554 {
 555         struct qm_eqcr *eqcr = &portal->eqcr;
 556 
 557         eqcr->ithresh = ithresh;
 558         qm_out(portal, QM_REG_EQCR_ITR, ithresh);
 559 }
 560 
 561 static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
 562 {
 563         struct qm_eqcr *eqcr = &portal->eqcr;
 564 
 565         return eqcr->available;
 566 }
 567 
 568 static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
 569 {
 570         struct qm_eqcr *eqcr = &portal->eqcr;
 571 
 572         return QM_EQCR_SIZE - 1 - eqcr->available;
 573 }
 574 
 575 
 576 
 577 #define DQRR_SHIFT      ilog2(sizeof(struct qm_dqrr_entry))
 578 #define DQRR_CARRY      (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
 579 
 580 static const struct qm_dqrr_entry *dqrr_carryclear(
 581                                         const struct qm_dqrr_entry *p)
 582 {
 583         uintptr_t addr = (uintptr_t)p;
 584 
 585         addr &= ~DQRR_CARRY;
 586 
 587         return (const struct qm_dqrr_entry *)addr;
 588 }
 589 
 590 static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
 591 {
 592         return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
 593 }
 594 
 595 static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
 596 {
 597         return dqrr_carryclear(e + 1);
 598 }
 599 
 600 static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
 601 {
 602         qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
 603                                    ((mf & (QM_DQRR_SIZE - 1)) << 20));
 604 }
 605 
 606 static inline int qm_dqrr_init(struct qm_portal *portal,
 607                                const struct qm_portal_config *config,
 608                                enum qm_dqrr_dmode dmode,
 609                                enum qm_dqrr_pmode pmode,
 610                                enum qm_dqrr_cmode cmode, u8 max_fill)
 611 {
 612         struct qm_dqrr *dqrr = &portal->dqrr;
 613         u32 cfg;
 614 
 615         
 616         qm_out(portal, QM_REG_DQRR_SDQCR, 0);
 617         qm_out(portal, QM_REG_DQRR_VDQCR, 0);
 618         qm_out(portal, QM_REG_DQRR_PDQCR, 0);
 619         dqrr->ring = portal->addr.ce + QM_CL_DQRR;
 620         dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
 621         dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
 622         dqrr->cursor = dqrr->ring + dqrr->ci;
 623         dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
 624         dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
 625                         QM_DQRR_VERB_VBIT : 0;
 626         dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
 627 #ifdef CONFIG_FSL_DPAA_CHECKING
 628         dqrr->dmode = dmode;
 629         dqrr->pmode = pmode;
 630         dqrr->cmode = cmode;
 631 #endif
 632         
 633         for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
 634                 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
 635         cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
 636                 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | 
 637                 ((dmode & 1) << 18) |                   
 638                 ((cmode & 3) << 16) |                   
 639                 0xa0 |                                  
 640                 (0 ? 0x40 : 0) |                        
 641                 (0 ? 0x10 : 0);                         
 642         qm_out(portal, QM_REG_CFG, cfg);
 643         qm_dqrr_set_maxfill(portal, max_fill);
 644         return 0;
 645 }
 646 
 647 static inline void qm_dqrr_finish(struct qm_portal *portal)
 648 {
 649 #ifdef CONFIG_FSL_DPAA_CHECKING
 650         struct qm_dqrr *dqrr = &portal->dqrr;
 651 
 652         if (dqrr->cmode != qm_dqrr_cdc &&
 653             dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
 654                 pr_crit("Ignoring completed DQRR entries\n");
 655 #endif
 656 }
 657 
 658 static inline const struct qm_dqrr_entry *qm_dqrr_current(
 659                                                 struct qm_portal *portal)
 660 {
 661         struct qm_dqrr *dqrr = &portal->dqrr;
 662 
 663         if (!dqrr->fill)
 664                 return NULL;
 665         return dqrr->cursor;
 666 }
 667 
 668 static inline u8 qm_dqrr_next(struct qm_portal *portal)
 669 {
 670         struct qm_dqrr *dqrr = &portal->dqrr;
 671 
 672         DPAA_ASSERT(dqrr->fill);
 673         dqrr->cursor = dqrr_inc(dqrr->cursor);
 674         return --dqrr->fill;
 675 }
 676 
 677 static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
 678 {
 679         struct qm_dqrr *dqrr = &portal->dqrr;
 680         struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
 681 
 682         DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
 683 #ifndef CONFIG_FSL_PAMU
 684         
 685 
 686 
 687 
 688         dpaa_invalidate_touch_ro(res);
 689 #endif
 690         if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
 691                 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
 692                 if (!dqrr->pi)
 693                         dqrr->vbit ^= QM_DQRR_VERB_VBIT;
 694                 dqrr->fill++;
 695         }
 696 }
 697 
 698 static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
 699                                         const struct qm_dqrr_entry *dq,
 700                                         int park)
 701 {
 702         __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
 703         int idx = dqrr_ptr2idx(dq);
 704 
 705         DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
 706         DPAA_ASSERT((dqrr->ring + idx) == dq);
 707         DPAA_ASSERT(idx < QM_DQRR_SIZE);
 708         qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | 
 709                ((park ? 1 : 0) << 6) |              
 710                idx);                                
 711 }
 712 
 713 static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
 714 {
 715         __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
 716 
 717         DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
 718         qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | 
 719                (bitmask << 16));                    
 720 }
 721 
 722 static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
 723 {
 724         qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
 725 }
 726 
 727 static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
 728 {
 729         qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
 730 }
 731 
 732 static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
 733 {
 734 
 735         if (ithresh > QMAN_DQRR_IT_MAX)
 736                 return -EINVAL;
 737 
 738         qm_out(portal, QM_REG_DQRR_ITR, ithresh);
 739 
 740         return 0;
 741 }
 742 
 743 
 744 
 745 #define MR_SHIFT        ilog2(sizeof(union qm_mr_entry))
 746 #define MR_CARRY        (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
 747 
 748 static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
 749 {
 750         uintptr_t addr = (uintptr_t)p;
 751 
 752         addr &= ~MR_CARRY;
 753 
 754         return (union qm_mr_entry *)addr;
 755 }
 756 
 757 static inline int mr_ptr2idx(const union qm_mr_entry *e)
 758 {
 759         return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
 760 }
 761 
 762 static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
 763 {
 764         return mr_carryclear(e + 1);
 765 }
 766 
 767 static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
 768                              enum qm_mr_cmode cmode)
 769 {
 770         struct qm_mr *mr = &portal->mr;
 771         u32 cfg;
 772 
 773         mr->ring = portal->addr.ce + QM_CL_MR;
 774         mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
 775         mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
 776         mr->cursor = mr->ring + mr->ci;
 777         mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
 778         mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
 779                 ? QM_MR_VERB_VBIT : 0;
 780         mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
 781 #ifdef CONFIG_FSL_DPAA_CHECKING
 782         mr->pmode = pmode;
 783         mr->cmode = cmode;
 784 #endif
 785         cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
 786               ((cmode & 1) << 8);       
 787         qm_out(portal, QM_REG_CFG, cfg);
 788         return 0;
 789 }
 790 
 791 static inline void qm_mr_finish(struct qm_portal *portal)
 792 {
 793         struct qm_mr *mr = &portal->mr;
 794 
 795         if (mr->ci != mr_ptr2idx(mr->cursor))
 796                 pr_crit("Ignoring completed MR entries\n");
 797 }
 798 
 799 static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
 800 {
 801         struct qm_mr *mr = &portal->mr;
 802 
 803         if (!mr->fill)
 804                 return NULL;
 805         return mr->cursor;
 806 }
 807 
 808 static inline int qm_mr_next(struct qm_portal *portal)
 809 {
 810         struct qm_mr *mr = &portal->mr;
 811 
 812         DPAA_ASSERT(mr->fill);
 813         mr->cursor = mr_inc(mr->cursor);
 814         return --mr->fill;
 815 }
 816 
 817 static inline void qm_mr_pvb_update(struct qm_portal *portal)
 818 {
 819         struct qm_mr *mr = &portal->mr;
 820         union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
 821 
 822         DPAA_ASSERT(mr->pmode == qm_mr_pvb);
 823 
 824         if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
 825                 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
 826                 if (!mr->pi)
 827                         mr->vbit ^= QM_MR_VERB_VBIT;
 828                 mr->fill++;
 829                 res = mr_inc(res);
 830         }
 831         dpaa_invalidate_touch_ro(res);
 832 }
 833 
 834 static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
 835 {
 836         struct qm_mr *mr = &portal->mr;
 837 
 838         DPAA_ASSERT(mr->cmode == qm_mr_cci);
 839         mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
 840         qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
 841 }
 842 
 843 static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
 844 {
 845         struct qm_mr *mr = &portal->mr;
 846 
 847         DPAA_ASSERT(mr->cmode == qm_mr_cci);
 848         mr->ci = mr_ptr2idx(mr->cursor);
 849         qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
 850 }
 851 
 852 static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
 853 {
 854         qm_out(portal, QM_REG_MR_ITR, ithresh);
 855 }
 856 
 857 
 858 
 859 static inline int qm_mc_init(struct qm_portal *portal)
 860 {
 861         u8 rr0, rr1;
 862         struct qm_mc *mc = &portal->mc;
 863 
 864         mc->cr = portal->addr.ce + QM_CL_CR;
 865         mc->rr = portal->addr.ce + QM_CL_RR0;
 866         
 867 
 868 
 869 
 870 
 871 
 872 
 873         rr0 = mc->rr->verb;
 874         rr1 = (mc->rr+1)->verb;
 875         if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
 876                 mc->rridx = 1;
 877         else
 878                 mc->rridx = 0;
 879         mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
 880 #ifdef CONFIG_FSL_DPAA_CHECKING
 881         mc->state = qman_mc_idle;
 882 #endif
 883         return 0;
 884 }
 885 
 886 static inline void qm_mc_finish(struct qm_portal *portal)
 887 {
 888 #ifdef CONFIG_FSL_DPAA_CHECKING
 889         struct qm_mc *mc = &portal->mc;
 890 
 891         DPAA_ASSERT(mc->state == qman_mc_idle);
 892         if (mc->state != qman_mc_idle)
 893                 pr_crit("Losing incomplete MC command\n");
 894 #endif
 895 }
 896 
 897 static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
 898 {
 899         struct qm_mc *mc = &portal->mc;
 900 
 901         DPAA_ASSERT(mc->state == qman_mc_idle);
 902 #ifdef CONFIG_FSL_DPAA_CHECKING
 903         mc->state = qman_mc_user;
 904 #endif
 905         dpaa_zero(mc->cr);
 906         return mc->cr;
 907 }
 908 
 909 static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
 910 {
 911         struct qm_mc *mc = &portal->mc;
 912         union qm_mc_result *rr = mc->rr + mc->rridx;
 913 
 914         DPAA_ASSERT(mc->state == qman_mc_user);
 915         dma_wmb();
 916         mc->cr->_ncw_verb = myverb | mc->vbit;
 917         dpaa_flush(mc->cr);
 918         dpaa_invalidate_touch_ro(rr);
 919 #ifdef CONFIG_FSL_DPAA_CHECKING
 920         mc->state = qman_mc_hw;
 921 #endif
 922 }
 923 
 924 static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
 925 {
 926         struct qm_mc *mc = &portal->mc;
 927         union qm_mc_result *rr = mc->rr + mc->rridx;
 928 
 929         DPAA_ASSERT(mc->state == qman_mc_hw);
 930         
 931 
 932 
 933 
 934 
 935         if (!rr->verb) {
 936                 dpaa_invalidate_touch_ro(rr);
 937                 return NULL;
 938         }
 939         mc->rridx ^= 1;
 940         mc->vbit ^= QM_MCC_VERB_VBIT;
 941 #ifdef CONFIG_FSL_DPAA_CHECKING
 942         mc->state = qman_mc_idle;
 943 #endif
 944         return rr;
 945 }
 946 
 947 static inline int qm_mc_result_timeout(struct qm_portal *portal,
 948                                        union qm_mc_result **mcr)
 949 {
 950         int timeout = QM_MCR_TIMEOUT;
 951 
 952         do {
 953                 *mcr = qm_mc_result(portal);
 954                 if (*mcr)
 955                         break;
 956                 udelay(1);
 957         } while (--timeout);
 958 
 959         return timeout;
 960 }
 961 
 962 static inline void fq_set(struct qman_fq *fq, u32 mask)
 963 {
 964         fq->flags |= mask;
 965 }
 966 
 967 static inline void fq_clear(struct qman_fq *fq, u32 mask)
 968 {
 969         fq->flags &= ~mask;
 970 }
 971 
 972 static inline int fq_isset(struct qman_fq *fq, u32 mask)
 973 {
 974         return fq->flags & mask;
 975 }
 976 
 977 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
 978 {
 979         return !(fq->flags & mask);
 980 }
 981 
 982 struct qman_portal {
 983         struct qm_portal p;
 984         
 985         unsigned long bits;
 986         
 987         unsigned long irq_sources;
 988         u32 use_eqcr_ci_stashing;
 989         
 990         struct qman_fq *vdqcr_owned;
 991         u32 sdqcr;
 992         
 993         const struct qm_portal_config *config;
 994         
 995         struct qman_cgrs *cgrs;
 996         
 997         struct list_head cgr_cbs;
 998         
 999         spinlock_t cgr_lock;
1000         struct work_struct congestion_work;
1001         struct work_struct mr_work;
1002         char irqname[MAX_IRQNAME];
1003 };
1004 
1005 static cpumask_t affine_mask;
1006 static DEFINE_SPINLOCK(affine_mask_lock);
1007 static u16 affine_channels[NR_CPUS];
1008 static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
1009 struct qman_portal *affine_portals[NR_CPUS];
1010 
1011 static inline struct qman_portal *get_affine_portal(void)
1012 {
1013         return &get_cpu_var(qman_affine_portal);
1014 }
1015 
1016 static inline void put_affine_portal(void)
1017 {
1018         put_cpu_var(qman_affine_portal);
1019 }
1020 
1021 
1022 static inline struct qman_portal *get_portal_for_channel(u16 channel)
1023 {
1024         int i;
1025 
1026         for (i = 0; i < num_possible_cpus(); i++) {
1027                 if (affine_portals[i] &&
1028                     affine_portals[i]->config->channel == channel)
1029                         return affine_portals[i];
1030         }
1031 
1032         return NULL;
1033 }
1034 
1035 static struct workqueue_struct *qm_portal_wq;
1036 
1037 int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
1038 {
1039         int res;
1040 
1041         if (!portal)
1042                 return -EINVAL;
1043 
1044         res = qm_dqrr_set_ithresh(&portal->p, ithresh);
1045         if (res)
1046                 return res;
1047 
1048         portal->p.dqrr.ithresh = ithresh;
1049 
1050         return 0;
1051 }
1052 EXPORT_SYMBOL(qman_dqrr_set_ithresh);
1053 
1054 void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
1055 {
1056         if (portal && ithresh)
1057                 *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
1058 }
1059 EXPORT_SYMBOL(qman_dqrr_get_ithresh);
1060 
1061 void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
1062 {
1063         if (portal && iperiod)
1064                 *iperiod = qm_in(&portal->p, QM_REG_ITPR);
1065 }
1066 EXPORT_SYMBOL(qman_portal_get_iperiod);
1067 
1068 int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
1069 {
1070         if (!portal || iperiod > QMAN_ITP_MAX)
1071                 return -EINVAL;
1072 
1073         qm_out(&portal->p, QM_REG_ITPR, iperiod);
1074 
1075         return 0;
1076 }
1077 EXPORT_SYMBOL(qman_portal_set_iperiod);
1078 
1079 int qman_wq_alloc(void)
1080 {
1081         qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
1082         if (!qm_portal_wq)
1083                 return -ENOMEM;
1084         return 0;
1085 }
1086 
1087 
1088 void qman_enable_irqs(void)
1089 {
1090         int i;
1091 
1092         for (i = 0; i < num_possible_cpus(); i++) {
1093                 if (affine_portals[i]) {
1094                         qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
1095                         qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
1096                 }
1097 
1098         }
1099 }
1100 
1101 
1102 
1103 
1104 
1105 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1106 
1107 static struct qman_fq **fq_table;
1108 static u32 num_fqids;
1109 
1110 int qman_alloc_fq_table(u32 _num_fqids)
1111 {
1112         num_fqids = _num_fqids;
1113 
1114         fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
1115                                        num_fqids, 2));
1116         if (!fq_table)
1117                 return -ENOMEM;
1118 
1119         pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1120                  fq_table, num_fqids * 2);
1121         return 0;
1122 }
1123 
1124 static struct qman_fq *idx_to_fq(u32 idx)
1125 {
1126         struct qman_fq *fq;
1127 
1128 #ifdef CONFIG_FSL_DPAA_CHECKING
1129         if (WARN_ON(idx >= num_fqids * 2))
1130                 return NULL;
1131 #endif
1132         fq = fq_table[idx];
1133         DPAA_ASSERT(!fq || idx == fq->idx);
1134 
1135         return fq;
1136 }
1137 
1138 
1139 
1140 
1141 
1142 static struct qman_fq *fqid_to_fq(u32 fqid)
1143 {
1144         return idx_to_fq(fqid * 2);
1145 }
1146 
1147 static struct qman_fq *tag_to_fq(u32 tag)
1148 {
1149 #if BITS_PER_LONG == 64
1150         return idx_to_fq(tag);
1151 #else
1152         return (struct qman_fq *)tag;
1153 #endif
1154 }
1155 
1156 static u32 fq_to_tag(struct qman_fq *fq)
1157 {
1158 #if BITS_PER_LONG == 64
1159         return fq->idx;
1160 #else
1161         return (u32)fq;
1162 #endif
1163 }
1164 
1165 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1166 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1167                                         unsigned int poll_limit);
1168 static void qm_congestion_task(struct work_struct *work);
1169 static void qm_mr_process_task(struct work_struct *work);
1170 
1171 static irqreturn_t portal_isr(int irq, void *ptr)
1172 {
1173         struct qman_portal *p = ptr;
1174         u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1175         u32 clear = 0;
1176 
1177         if (unlikely(!is))
1178                 return IRQ_NONE;
1179 
1180         
1181         if (is & QM_PIRQ_DQRI) {
1182                 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1183                 clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
1184         }
1185         
1186         clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1187         qm_out(&p->p, QM_REG_ISR, clear);
1188         return IRQ_HANDLED;
1189 }
1190 
1191 static int drain_mr_fqrni(struct qm_portal *p)
1192 {
1193         const union qm_mr_entry *msg;
1194 loop:
1195         qm_mr_pvb_update(p);
1196         msg = qm_mr_current(p);
1197         if (!msg) {
1198                 
1199 
1200 
1201 
1202 
1203 
1204 
1205 
1206 
1207 
1208 
1209 
1210 
1211 
1212                 mdelay(1);
1213                 qm_mr_pvb_update(p);
1214                 msg = qm_mr_current(p);
1215                 if (!msg)
1216                         return 0;
1217         }
1218         if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1219                 
1220                 pr_err("Found verb 0x%x in MR\n", msg->verb);
1221                 return -1;
1222         }
1223         qm_mr_next(p);
1224         qm_mr_cci_consume(p, 1);
1225         goto loop;
1226 }
1227 
1228 static int qman_create_portal(struct qman_portal *portal,
1229                               const struct qm_portal_config *c,
1230                               const struct qman_cgrs *cgrs)
1231 {
1232         struct qm_portal *p;
1233         int ret;
1234         u32 isdr;
1235 
1236         p = &portal->p;
1237 
1238 #ifdef CONFIG_FSL_PAMU
1239         
1240         portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1241 #else
1242         portal->use_eqcr_ci_stashing = 0;
1243 #endif
1244         
1245 
1246 
1247 
1248 
1249         p->addr.ce = c->addr_virt_ce;
1250         p->addr.ce_be = c->addr_virt_ce;
1251         p->addr.ci = c->addr_virt_ci;
1252         
1253 
1254 
1255 
1256         if (qm_eqcr_init(p, qm_eqcr_pvb,
1257                         portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1258                 dev_err(c->dev, "EQCR initialisation failed\n");
1259                 goto fail_eqcr;
1260         }
1261         if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1262                         qm_dqrr_cdc, DQRR_MAXFILL)) {
1263                 dev_err(c->dev, "DQRR initialisation failed\n");
1264                 goto fail_dqrr;
1265         }
1266         if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1267                 dev_err(c->dev, "MR initialisation failed\n");
1268                 goto fail_mr;
1269         }
1270         if (qm_mc_init(p)) {
1271                 dev_err(c->dev, "MC initialisation failed\n");
1272                 goto fail_mc;
1273         }
1274         
1275         qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1276         qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1277         qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1278         portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
1279         if (!portal->cgrs)
1280                 goto fail_cgrs;
1281         
1282         qman_cgrs_init(&portal->cgrs[1]);
1283         if (cgrs)
1284                 portal->cgrs[0] = *cgrs;
1285         else
1286                 
1287                 qman_cgrs_fill(&portal->cgrs[0]);
1288         INIT_LIST_HEAD(&portal->cgr_cbs);
1289         spin_lock_init(&portal->cgr_lock);
1290         INIT_WORK(&portal->congestion_work, qm_congestion_task);
1291         INIT_WORK(&portal->mr_work, qm_mr_process_task);
1292         portal->bits = 0;
1293         portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1294                         QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1295                         QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1296         isdr = 0xffffffff;
1297         qm_out(p, QM_REG_ISDR, isdr);
1298         portal->irq_sources = 0;
1299         qm_out(p, QM_REG_IER, 0);
1300         snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1301         qm_out(p, QM_REG_IIR, 1);
1302         if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1303                 dev_err(c->dev, "request_irq() failed\n");
1304                 goto fail_irq;
1305         }
1306 
1307         if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
1308                 goto fail_affinity;
1309 
1310         
1311         isdr &= ~QM_PIRQ_EQCI;
1312         qm_out(p, QM_REG_ISDR, isdr);
1313         ret = qm_eqcr_get_fill(p);
1314         if (ret) {
1315                 dev_err(c->dev, "EQCR unclean\n");
1316                 goto fail_eqcr_empty;
1317         }
1318         isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1319         qm_out(p, QM_REG_ISDR, isdr);
1320         if (qm_dqrr_current(p)) {
1321                 dev_dbg(c->dev, "DQRR unclean\n");
1322                 qm_dqrr_cdc_consume_n(p, 0xffff);
1323         }
1324         if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1325                 
1326                 const union qm_mr_entry *e = qm_mr_current(p);
1327 
1328                 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1329                         e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1330                 goto fail_dqrr_mr_empty;
1331         }
1332         
1333         portal->config = c;
1334         qm_out(p, QM_REG_ISR, 0xffffffff);
1335         qm_out(p, QM_REG_ISDR, 0);
1336         if (!qman_requires_cleanup())
1337                 qm_out(p, QM_REG_IIR, 0);
1338         
1339         qm_dqrr_sdqcr_set(p, portal->sdqcr);
1340         return 0;
1341 
1342 fail_dqrr_mr_empty:
1343 fail_eqcr_empty:
1344 fail_affinity:
1345         free_irq(c->irq, portal);
1346 fail_irq:
1347         kfree(portal->cgrs);
1348 fail_cgrs:
1349         qm_mc_finish(p);
1350 fail_mc:
1351         qm_mr_finish(p);
1352 fail_mr:
1353         qm_dqrr_finish(p);
1354 fail_dqrr:
1355         qm_eqcr_finish(p);
1356 fail_eqcr:
1357         return -EIO;
1358 }
1359 
1360 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1361                                               const struct qman_cgrs *cgrs)
1362 {
1363         struct qman_portal *portal;
1364         int err;
1365 
1366         portal = &per_cpu(qman_affine_portal, c->cpu);
1367         err = qman_create_portal(portal, c, cgrs);
1368         if (err)
1369                 return NULL;
1370 
1371         spin_lock(&affine_mask_lock);
1372         cpumask_set_cpu(c->cpu, &affine_mask);
1373         affine_channels[c->cpu] = c->channel;
1374         affine_portals[c->cpu] = portal;
1375         spin_unlock(&affine_mask_lock);
1376 
1377         return portal;
1378 }
1379 
1380 static void qman_destroy_portal(struct qman_portal *qm)
1381 {
1382         const struct qm_portal_config *pcfg;
1383 
1384         
1385         qm_dqrr_sdqcr_set(&qm->p, 0);
1386 
1387         
1388 
1389 
1390 
1391 
1392 
1393 
1394 
1395 
1396         qm_eqcr_cce_update(&qm->p);
1397         qm_eqcr_cce_update(&qm->p);
1398         pcfg = qm->config;
1399 
1400         free_irq(pcfg->irq, qm);
1401 
1402         kfree(qm->cgrs);
1403         qm_mc_finish(&qm->p);
1404         qm_mr_finish(&qm->p);
1405         qm_dqrr_finish(&qm->p);
1406         qm_eqcr_finish(&qm->p);
1407 
1408         qm->config = NULL;
1409 }
1410 
1411 const struct qm_portal_config *qman_destroy_affine_portal(void)
1412 {
1413         struct qman_portal *qm = get_affine_portal();
1414         const struct qm_portal_config *pcfg;
1415         int cpu;
1416 
1417         pcfg = qm->config;
1418         cpu = pcfg->cpu;
1419 
1420         qman_destroy_portal(qm);
1421 
1422         spin_lock(&affine_mask_lock);
1423         cpumask_clear_cpu(cpu, &affine_mask);
1424         spin_unlock(&affine_mask_lock);
1425         put_affine_portal();
1426         return pcfg;
1427 }
1428 
1429 
1430 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1431                                    const union qm_mr_entry *msg, u8 verb)
1432 {
1433         switch (verb) {
1434         case QM_MR_VERB_FQRL:
1435                 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1436                 fq_clear(fq, QMAN_FQ_STATE_ORL);
1437                 break;
1438         case QM_MR_VERB_FQRN:
1439                 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1440                             fq->state == qman_fq_state_sched);
1441                 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1442                 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1443                 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1444                         fq_set(fq, QMAN_FQ_STATE_NE);
1445                 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1446                         fq_set(fq, QMAN_FQ_STATE_ORL);
1447                 fq->state = qman_fq_state_retired;
1448                 break;
1449         case QM_MR_VERB_FQPN:
1450                 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1451                 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1452                 fq->state = qman_fq_state_parked;
1453         }
1454 }
1455 
1456 static void qm_congestion_task(struct work_struct *work)
1457 {
1458         struct qman_portal *p = container_of(work, struct qman_portal,
1459                                              congestion_work);
1460         struct qman_cgrs rr, c;
1461         union qm_mc_result *mcr;
1462         struct qman_cgr *cgr;
1463 
1464         spin_lock(&p->cgr_lock);
1465         qm_mc_start(&p->p);
1466         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1467         if (!qm_mc_result_timeout(&p->p, &mcr)) {
1468                 spin_unlock(&p->cgr_lock);
1469                 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1470                 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1471                 return;
1472         }
1473         
1474         qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1475                       &p->cgrs[0]);
1476         
1477         qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1478         
1479         qman_cgrs_cp(&p->cgrs[1], &rr);
1480         
1481         list_for_each_entry(cgr, &p->cgr_cbs, node)
1482                 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1483                         cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1484         spin_unlock(&p->cgr_lock);
1485         qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1486 }
1487 
1488 static void qm_mr_process_task(struct work_struct *work)
1489 {
1490         struct qman_portal *p = container_of(work, struct qman_portal,
1491                                              mr_work);
1492         const union qm_mr_entry *msg;
1493         struct qman_fq *fq;
1494         u8 verb, num = 0;
1495 
1496         preempt_disable();
1497 
1498         while (1) {
1499                 qm_mr_pvb_update(&p->p);
1500                 msg = qm_mr_current(&p->p);
1501                 if (!msg)
1502                         break;
1503 
1504                 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1505                 
1506                 if (verb & 0x20) {
1507                         switch (verb) {
1508                         case QM_MR_VERB_FQRNI:
1509                                 
1510                                 break;
1511                         case QM_MR_VERB_FQRN:
1512                         case QM_MR_VERB_FQRL:
1513                                 
1514                                 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1515                                 if (WARN_ON(!fq))
1516                                         break;
1517                                 fq_state_change(p, fq, msg, verb);
1518                                 if (fq->cb.fqs)
1519                                         fq->cb.fqs(p, fq, msg);
1520                                 break;
1521                         case QM_MR_VERB_FQPN:
1522                                 
1523                                 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1524                                 fq_state_change(p, fq, msg, verb);
1525                                 if (fq->cb.fqs)
1526                                         fq->cb.fqs(p, fq, msg);
1527                                 break;
1528                         case QM_MR_VERB_DC_ERN:
1529                                 
1530                                 pr_crit_once("Leaking DCP ERNs!\n");
1531                                 break;
1532                         default:
1533                                 pr_crit("Invalid MR verb 0x%02x\n", verb);
1534                         }
1535                 } else {
1536                         
1537                         fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1538                         fq->cb.ern(p, fq, msg);
1539                 }
1540                 num++;
1541                 qm_mr_next(&p->p);
1542         }
1543 
1544         qm_mr_cci_consume(&p->p, num);
1545         qman_p_irqsource_add(p, QM_PIRQ_MRI);
1546         preempt_enable();
1547 }
1548 
1549 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1550 {
1551         if (is & QM_PIRQ_CSCI) {
1552                 qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1553                 queue_work_on(smp_processor_id(), qm_portal_wq,
1554                               &p->congestion_work);
1555         }
1556 
1557         if (is & QM_PIRQ_EQRI) {
1558                 qm_eqcr_cce_update(&p->p);
1559                 qm_eqcr_set_ithresh(&p->p, 0);
1560                 wake_up(&affine_queue);
1561         }
1562 
1563         if (is & QM_PIRQ_MRI) {
1564                 qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1565                 queue_work_on(smp_processor_id(), qm_portal_wq,
1566                               &p->mr_work);
1567         }
1568 
1569         return is;
1570 }
1571 
1572 
1573 
1574 
1575 
1576 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1577 {
1578         p->vdqcr_owned = NULL;
1579         fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1580         wake_up(&affine_queue);
1581 }
1582 
1583 
1584 
1585 
1586 
1587 
1588 
1589 
1590 
1591 
1592 
1593 
1594 
1595 
1596 
1597 
1598 
1599 
1600 
1601 
1602 
1603 
1604 
1605 
1606 
1607 
1608 
1609 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1610                                         unsigned int poll_limit)
1611 {
1612         const struct qm_dqrr_entry *dq;
1613         struct qman_fq *fq;
1614         enum qman_cb_dqrr_result res;
1615         unsigned int limit = 0;
1616 
1617         do {
1618                 qm_dqrr_pvb_update(&p->p);
1619                 dq = qm_dqrr_current(&p->p);
1620                 if (!dq)
1621                         break;
1622 
1623                 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1624                         
1625 
1626 
1627 
1628 
1629                         fq = p->vdqcr_owned;
1630                         
1631 
1632 
1633 
1634 
1635 
1636                         if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1637                                 fq_clear(fq, QMAN_FQ_STATE_NE);
1638                         
1639 
1640 
1641 
1642 
1643 
1644                         res = fq->cb.dqrr(p, fq, dq);
1645                         if (res == qman_cb_dqrr_stop)
1646                                 break;
1647                         
1648                         if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1649                                 clear_vdqcr(p, fq);
1650                 } else {
1651                         
1652                         fq = tag_to_fq(be32_to_cpu(dq->context_b));
1653                         
1654                         res = fq->cb.dqrr(p, fq, dq);
1655                         
1656 
1657 
1658 
1659                         if (res == qman_cb_dqrr_stop)
1660                                 break;
1661                 }
1662                 
1663                 
1664 
1665 
1666 
1667 
1668                 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1669                             (res != qman_cb_dqrr_park));
1670                 
1671                 if (res != qman_cb_dqrr_defer)
1672                         qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1673                                                  res == qman_cb_dqrr_park);
1674                 
1675                 qm_dqrr_next(&p->p);
1676                 
1677 
1678 
1679 
1680 
1681 
1682         } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1683 
1684         return limit;
1685 }
1686 
1687 void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1688 {
1689         unsigned long irqflags;
1690 
1691         local_irq_save(irqflags);
1692         p->irq_sources |= bits & QM_PIRQ_VISIBLE;
1693         qm_out(&p->p, QM_REG_IER, p->irq_sources);
1694         local_irq_restore(irqflags);
1695 }
1696 EXPORT_SYMBOL(qman_p_irqsource_add);
1697 
1698 void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1699 {
1700         unsigned long irqflags;
1701         u32 ier;
1702 
1703         
1704 
1705 
1706 
1707 
1708 
1709 
1710 
1711 
1712 
1713         local_irq_save(irqflags);
1714         bits &= QM_PIRQ_VISIBLE;
1715         p->irq_sources &= ~bits;
1716         qm_out(&p->p, QM_REG_IER, p->irq_sources);
1717         ier = qm_in(&p->p, QM_REG_IER);
1718         
1719 
1720 
1721 
1722         qm_out(&p->p, QM_REG_ISR, ~ier);
1723         local_irq_restore(irqflags);
1724 }
1725 EXPORT_SYMBOL(qman_p_irqsource_remove);
1726 
1727 const cpumask_t *qman_affine_cpus(void)
1728 {
1729         return &affine_mask;
1730 }
1731 EXPORT_SYMBOL(qman_affine_cpus);
1732 
1733 u16 qman_affine_channel(int cpu)
1734 {
1735         if (cpu < 0) {
1736                 struct qman_portal *portal = get_affine_portal();
1737 
1738                 cpu = portal->config->cpu;
1739                 put_affine_portal();
1740         }
1741         WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1742         return affine_channels[cpu];
1743 }
1744 EXPORT_SYMBOL(qman_affine_channel);
1745 
1746 struct qman_portal *qman_get_affine_portal(int cpu)
1747 {
1748         return affine_portals[cpu];
1749 }
1750 EXPORT_SYMBOL(qman_get_affine_portal);
1751 
1752 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1753 {
1754         return __poll_portal_fast(p, limit);
1755 }
1756 EXPORT_SYMBOL(qman_p_poll_dqrr);
1757 
1758 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1759 {
1760         unsigned long irqflags;
1761 
1762         local_irq_save(irqflags);
1763         pools &= p->config->pools;
1764         p->sdqcr |= pools;
1765         qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1766         local_irq_restore(irqflags);
1767 }
1768 EXPORT_SYMBOL(qman_p_static_dequeue_add);
1769 
1770 
1771 
1772 static const char *mcr_result_str(u8 result)
1773 {
1774         switch (result) {
1775         case QM_MCR_RESULT_NULL:
1776                 return "QM_MCR_RESULT_NULL";
1777         case QM_MCR_RESULT_OK:
1778                 return "QM_MCR_RESULT_OK";
1779         case QM_MCR_RESULT_ERR_FQID:
1780                 return "QM_MCR_RESULT_ERR_FQID";
1781         case QM_MCR_RESULT_ERR_FQSTATE:
1782                 return "QM_MCR_RESULT_ERR_FQSTATE";
1783         case QM_MCR_RESULT_ERR_NOTEMPTY:
1784                 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1785         case QM_MCR_RESULT_PENDING:
1786                 return "QM_MCR_RESULT_PENDING";
1787         case QM_MCR_RESULT_ERR_BADCOMMAND:
1788                 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1789         }
1790         return "<unknown MCR result>";
1791 }
1792 
1793 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1794 {
1795         if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1796                 int ret = qman_alloc_fqid(&fqid);
1797 
1798                 if (ret)
1799                         return ret;
1800         }
1801         fq->fqid = fqid;
1802         fq->flags = flags;
1803         fq->state = qman_fq_state_oos;
1804         fq->cgr_groupid = 0;
1805 
1806         
1807         if (fqid == 0 || fqid >= num_fqids) {
1808                 WARN(1, "bad fqid %d\n", fqid);
1809                 return -EINVAL;
1810         }
1811 
1812         fq->idx = fqid * 2;
1813         if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1814                 fq->idx++;
1815 
1816         WARN_ON(fq_table[fq->idx]);
1817         fq_table[fq->idx] = fq;
1818 
1819         return 0;
1820 }
1821 EXPORT_SYMBOL(qman_create_fq);
1822 
1823 void qman_destroy_fq(struct qman_fq *fq)
1824 {
1825         
1826 
1827 
1828 
1829         switch (fq->state) {
1830         case qman_fq_state_parked:
1831         case qman_fq_state_oos:
1832                 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1833                         qman_release_fqid(fq->fqid);
1834 
1835                 DPAA_ASSERT(fq_table[fq->idx]);
1836                 fq_table[fq->idx] = NULL;
1837                 return;
1838         default:
1839                 break;
1840         }
1841         DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1842 }
1843 EXPORT_SYMBOL(qman_destroy_fq);
1844 
1845 u32 qman_fq_fqid(struct qman_fq *fq)
1846 {
1847         return fq->fqid;
1848 }
1849 EXPORT_SYMBOL(qman_fq_fqid);
1850 
1851 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1852 {
1853         union qm_mc_command *mcc;
1854         union qm_mc_result *mcr;
1855         struct qman_portal *p;
1856         u8 res, myverb;
1857         int ret = 0;
1858 
1859         myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1860                 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1861 
1862         if (fq->state != qman_fq_state_oos &&
1863             fq->state != qman_fq_state_parked)
1864                 return -EINVAL;
1865 #ifdef CONFIG_FSL_DPAA_CHECKING
1866         if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1867                 return -EINVAL;
1868 #endif
1869         if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1870                 
1871                 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1872                         return -EINVAL;
1873         }
1874         
1875         p = get_affine_portal();
1876         if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1877             (fq->state != qman_fq_state_oos &&
1878              fq->state != qman_fq_state_parked)) {
1879                 ret = -EBUSY;
1880                 goto out;
1881         }
1882         mcc = qm_mc_start(&p->p);
1883         if (opts)
1884                 mcc->initfq = *opts;
1885         qm_fqid_set(&mcc->fq, fq->fqid);
1886         mcc->initfq.count = 0;
1887         
1888 
1889 
1890 
1891 
1892         if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1893                 dma_addr_t phys_fq;
1894 
1895                 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1896                 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1897                 
1898 
1899 
1900 
1901                 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1902                                   QM_INITFQ_WE_CONTEXTA)) {
1903                         mcc->initfq.we_mask |=
1904                                 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1905                         memset(&mcc->initfq.fqd.context_a, 0,
1906                                 sizeof(mcc->initfq.fqd.context_a));
1907                 } else {
1908                         struct qman_portal *p = qman_dma_portal;
1909 
1910                         phys_fq = dma_map_single(p->config->dev, fq,
1911                                                  sizeof(*fq), DMA_TO_DEVICE);
1912                         if (dma_mapping_error(p->config->dev, phys_fq)) {
1913                                 dev_err(p->config->dev, "dma_mapping failed\n");
1914                                 ret = -EIO;
1915                                 goto out;
1916                         }
1917 
1918                         qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1919                 }
1920         }
1921         if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1922                 int wq = 0;
1923 
1924                 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1925                                   QM_INITFQ_WE_DESTWQ)) {
1926                         mcc->initfq.we_mask |=
1927                                 cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1928                         wq = 4;
1929                 }
1930                 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1931         }
1932         qm_mc_commit(&p->p, myverb);
1933         if (!qm_mc_result_timeout(&p->p, &mcr)) {
1934                 dev_err(p->config->dev, "MCR timeout\n");
1935                 ret = -ETIMEDOUT;
1936                 goto out;
1937         }
1938 
1939         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1940         res = mcr->result;
1941         if (res != QM_MCR_RESULT_OK) {
1942                 ret = -EIO;
1943                 goto out;
1944         }
1945         if (opts) {
1946                 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1947                         if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1948                                 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1949                         else
1950                                 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1951                 }
1952                 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1953                         fq->cgr_groupid = opts->fqd.cgid;
1954         }
1955         fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1956                 qman_fq_state_sched : qman_fq_state_parked;
1957 
1958 out:
1959         put_affine_portal();
1960         return ret;
1961 }
1962 EXPORT_SYMBOL(qman_init_fq);
1963 
1964 int qman_schedule_fq(struct qman_fq *fq)
1965 {
1966         union qm_mc_command *mcc;
1967         union qm_mc_result *mcr;
1968         struct qman_portal *p;
1969         int ret = 0;
1970 
1971         if (fq->state != qman_fq_state_parked)
1972                 return -EINVAL;
1973 #ifdef CONFIG_FSL_DPAA_CHECKING
1974         if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1975                 return -EINVAL;
1976 #endif
1977         
1978         p = get_affine_portal();
1979         if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1980             fq->state != qman_fq_state_parked) {
1981                 ret = -EBUSY;
1982                 goto out;
1983         }
1984         mcc = qm_mc_start(&p->p);
1985         qm_fqid_set(&mcc->fq, fq->fqid);
1986         qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1987         if (!qm_mc_result_timeout(&p->p, &mcr)) {
1988                 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1989                 ret = -ETIMEDOUT;
1990                 goto out;
1991         }
1992 
1993         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1994         if (mcr->result != QM_MCR_RESULT_OK) {
1995                 ret = -EIO;
1996                 goto out;
1997         }
1998         fq->state = qman_fq_state_sched;
1999 out:
2000         put_affine_portal();
2001         return ret;
2002 }
2003 EXPORT_SYMBOL(qman_schedule_fq);
2004 
2005 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
2006 {
2007         union qm_mc_command *mcc;
2008         union qm_mc_result *mcr;
2009         struct qman_portal *p;
2010         int ret;
2011         u8 res;
2012 
2013         if (fq->state != qman_fq_state_parked &&
2014             fq->state != qman_fq_state_sched)
2015                 return -EINVAL;
2016 #ifdef CONFIG_FSL_DPAA_CHECKING
2017         if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2018                 return -EINVAL;
2019 #endif
2020         p = get_affine_portal();
2021         if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
2022             fq->state == qman_fq_state_retired ||
2023             fq->state == qman_fq_state_oos) {
2024                 ret = -EBUSY;
2025                 goto out;
2026         }
2027         mcc = qm_mc_start(&p->p);
2028         qm_fqid_set(&mcc->fq, fq->fqid);
2029         qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2030         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2031                 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
2032                 ret = -ETIMEDOUT;
2033                 goto out;
2034         }
2035 
2036         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
2037         res = mcr->result;
2038         
2039 
2040 
2041 
2042 
2043 
2044 
2045 
2046 
2047         if (res == QM_MCR_RESULT_OK) {
2048                 ret = 0;
2049                 
2050                 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
2051                         fq_set(fq, QMAN_FQ_STATE_NE);
2052                 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
2053                         fq_set(fq, QMAN_FQ_STATE_ORL);
2054                 if (flags)
2055                         *flags = fq->flags;
2056                 fq->state = qman_fq_state_retired;
2057                 if (fq->cb.fqs) {
2058                         
2059 
2060 
2061 
2062 
2063 
2064 
2065 
2066 
2067                         union qm_mr_entry msg;
2068 
2069                         msg.verb = QM_MR_VERB_FQRNI;
2070                         msg.fq.fqs = mcr->alterfq.fqs;
2071                         qm_fqid_set(&msg.fq, fq->fqid);
2072                         msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
2073                         fq->cb.fqs(p, fq, &msg);
2074                 }
2075         } else if (res == QM_MCR_RESULT_PENDING) {
2076                 ret = 1;
2077                 fq_set(fq, QMAN_FQ_STATE_CHANGING);
2078         } else {
2079                 ret = -EIO;
2080         }
2081 out:
2082         put_affine_portal();
2083         return ret;
2084 }
2085 EXPORT_SYMBOL(qman_retire_fq);
2086 
2087 int qman_oos_fq(struct qman_fq *fq)
2088 {
2089         union qm_mc_command *mcc;
2090         union qm_mc_result *mcr;
2091         struct qman_portal *p;
2092         int ret = 0;
2093 
2094         if (fq->state != qman_fq_state_retired)
2095                 return -EINVAL;
2096 #ifdef CONFIG_FSL_DPAA_CHECKING
2097         if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2098                 return -EINVAL;
2099 #endif
2100         p = get_affine_portal();
2101         if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2102             fq->state != qman_fq_state_retired) {
2103                 ret = -EBUSY;
2104                 goto out;
2105         }
2106         mcc = qm_mc_start(&p->p);
2107         qm_fqid_set(&mcc->fq, fq->fqid);
2108         qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2109         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2110                 ret = -ETIMEDOUT;
2111                 goto out;
2112         }
2113         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2114         if (mcr->result != QM_MCR_RESULT_OK) {
2115                 ret = -EIO;
2116                 goto out;
2117         }
2118         fq->state = qman_fq_state_oos;
2119 out:
2120         put_affine_portal();
2121         return ret;
2122 }
2123 EXPORT_SYMBOL(qman_oos_fq);
2124 
2125 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2126 {
2127         union qm_mc_command *mcc;
2128         union qm_mc_result *mcr;
2129         struct qman_portal *p = get_affine_portal();
2130         int ret = 0;
2131 
2132         mcc = qm_mc_start(&p->p);
2133         qm_fqid_set(&mcc->fq, fq->fqid);
2134         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2135         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2136                 ret = -ETIMEDOUT;
2137                 goto out;
2138         }
2139 
2140         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2141         if (mcr->result == QM_MCR_RESULT_OK)
2142                 *fqd = mcr->queryfq.fqd;
2143         else
2144                 ret = -EIO;
2145 out:
2146         put_affine_portal();
2147         return ret;
2148 }
2149 
2150 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2151 {
2152         union qm_mc_command *mcc;
2153         union qm_mc_result *mcr;
2154         struct qman_portal *p = get_affine_portal();
2155         int ret = 0;
2156 
2157         mcc = qm_mc_start(&p->p);
2158         qm_fqid_set(&mcc->fq, fq->fqid);
2159         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2160         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2161                 ret = -ETIMEDOUT;
2162                 goto out;
2163         }
2164 
2165         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2166         if (mcr->result == QM_MCR_RESULT_OK)
2167                 *np = mcr->queryfq_np;
2168         else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2169                 ret = -ERANGE;
2170         else
2171                 ret = -EIO;
2172 out:
2173         put_affine_portal();
2174         return ret;
2175 }
2176 EXPORT_SYMBOL(qman_query_fq_np);
2177 
2178 static int qman_query_cgr(struct qman_cgr *cgr,
2179                           struct qm_mcr_querycgr *cgrd)
2180 {
2181         union qm_mc_command *mcc;
2182         union qm_mc_result *mcr;
2183         struct qman_portal *p = get_affine_portal();
2184         int ret = 0;
2185 
2186         mcc = qm_mc_start(&p->p);
2187         mcc->cgr.cgid = cgr->cgrid;
2188         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2189         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2190                 ret = -ETIMEDOUT;
2191                 goto out;
2192         }
2193         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2194         if (mcr->result == QM_MCR_RESULT_OK)
2195                 *cgrd = mcr->querycgr;
2196         else {
2197                 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2198                         mcr_result_str(mcr->result));
2199                 ret = -EIO;
2200         }
2201 out:
2202         put_affine_portal();
2203         return ret;
2204 }
2205 
2206 int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2207 {
2208         struct qm_mcr_querycgr query_cgr;
2209         int err;
2210 
2211         err = qman_query_cgr(cgr, &query_cgr);
2212         if (err)
2213                 return err;
2214 
2215         *result = !!query_cgr.cgr.cs;
2216         return 0;
2217 }
2218 EXPORT_SYMBOL(qman_query_cgr_congested);
2219 
2220 
2221 static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2222 {
2223         unsigned long irqflags;
2224         int ret = -EBUSY;
2225 
2226         local_irq_save(irqflags);
2227         if (p->vdqcr_owned)
2228                 goto out;
2229         if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2230                 goto out;
2231 
2232         fq_set(fq, QMAN_FQ_STATE_VDQCR);
2233         p->vdqcr_owned = fq;
2234         qm_dqrr_vdqcr_set(&p->p, vdqcr);
2235         ret = 0;
2236 out:
2237         local_irq_restore(irqflags);
2238         return ret;
2239 }
2240 
2241 static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2242 {
2243         int ret;
2244 
2245         *p = get_affine_portal();
2246         ret = set_p_vdqcr(*p, fq, vdqcr);
2247         put_affine_portal();
2248         return ret;
2249 }
2250 
2251 static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2252                                 u32 vdqcr, u32 flags)
2253 {
2254         int ret = 0;
2255 
2256         if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2257                 ret = wait_event_interruptible(affine_queue,
2258                                 !set_vdqcr(p, fq, vdqcr));
2259         else
2260                 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2261         return ret;
2262 }
2263 
2264 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2265 {
2266         struct qman_portal *p;
2267         int ret;
2268 
2269         if (fq->state != qman_fq_state_parked &&
2270             fq->state != qman_fq_state_retired)
2271                 return -EINVAL;
2272         if (vdqcr & QM_VDQCR_FQID_MASK)
2273                 return -EINVAL;
2274         if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2275                 return -EBUSY;
2276         vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2277         if (flags & QMAN_VOLATILE_FLAG_WAIT)
2278                 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2279         else
2280                 ret = set_vdqcr(&p, fq, vdqcr);
2281         if (ret)
2282                 return ret;
2283         
2284         if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2285                 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2286                         
2287 
2288 
2289 
2290 
2291 
2292                         wait_event_interruptible(affine_queue,
2293                                 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2294                 else
2295                         wait_event(affine_queue,
2296                                 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2297         }
2298         return 0;
2299 }
2300 EXPORT_SYMBOL(qman_volatile_dequeue);
2301 
2302 static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2303 {
2304         if (avail)
2305                 qm_eqcr_cce_prefetch(&p->p);
2306         else
2307                 qm_eqcr_cce_update(&p->p);
2308 }
2309 
2310 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2311 {
2312         struct qman_portal *p;
2313         struct qm_eqcr_entry *eq;
2314         unsigned long irqflags;
2315         u8 avail;
2316 
2317         p = get_affine_portal();
2318         local_irq_save(irqflags);
2319 
2320         if (p->use_eqcr_ci_stashing) {
2321                 
2322 
2323 
2324 
2325                 eq = qm_eqcr_start_stash(&p->p);
2326         } else {
2327                 
2328 
2329 
2330 
2331                 avail = qm_eqcr_get_avail(&p->p);
2332                 if (avail < 2)
2333                         update_eqcr_ci(p, avail);
2334                 eq = qm_eqcr_start_no_stash(&p->p);
2335         }
2336 
2337         if (unlikely(!eq))
2338                 goto out;
2339 
2340         qm_fqid_set(eq, fq->fqid);
2341         eq->tag = cpu_to_be32(fq_to_tag(fq));
2342         eq->fd = *fd;
2343 
2344         qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2345 out:
2346         local_irq_restore(irqflags);
2347         put_affine_portal();
2348         return 0;
2349 }
2350 EXPORT_SYMBOL(qman_enqueue);
2351 
2352 static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2353                          struct qm_mcc_initcgr *opts)
2354 {
2355         union qm_mc_command *mcc;
2356         union qm_mc_result *mcr;
2357         struct qman_portal *p = get_affine_portal();
2358         u8 verb = QM_MCC_VERB_MODIFYCGR;
2359         int ret = 0;
2360 
2361         mcc = qm_mc_start(&p->p);
2362         if (opts)
2363                 mcc->initcgr = *opts;
2364         mcc->initcgr.cgid = cgr->cgrid;
2365         if (flags & QMAN_CGR_FLAG_USE_INIT)
2366                 verb = QM_MCC_VERB_INITCGR;
2367         qm_mc_commit(&p->p, verb);
2368         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2369                 ret = -ETIMEDOUT;
2370                 goto out;
2371         }
2372 
2373         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2374         if (mcr->result != QM_MCR_RESULT_OK)
2375                 ret = -EIO;
2376 
2377 out:
2378         put_affine_portal();
2379         return ret;
2380 }
2381 
2382 #define PORTAL_IDX(n)   (n->config->channel - QM_CHANNEL_SWPORTAL0)
2383 
2384 
2385 static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2386 {
2387         if (qman_ip_rev >= QMAN_REV30)
2388                 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2389                                         QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2390         else
2391                 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2392 }
2393 
2394 static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2395 {
2396         if (qman_ip_rev >= QMAN_REV30)
2397                 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2398         else
2399                 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2400 }
2401 
2402 static u8 qman_cgr_cpus[CGR_NUM];
2403 
2404 void qman_init_cgr_all(void)
2405 {
2406         struct qman_cgr cgr;
2407         int err_cnt = 0;
2408 
2409         for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2410                 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2411                         err_cnt++;
2412         }
2413 
2414         if (err_cnt)
2415                 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2416                        err_cnt, (err_cnt > 1) ? "s" : "");
2417 }
2418 
2419 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2420                     struct qm_mcc_initcgr *opts)
2421 {
2422         struct qm_mcr_querycgr cgr_state;
2423         int ret;
2424         struct qman_portal *p;
2425 
2426         
2427 
2428 
2429 
2430 
2431 
2432         if (cgr->cgrid >= CGR_NUM)
2433                 return -EINVAL;
2434 
2435         preempt_disable();
2436         p = get_affine_portal();
2437         qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2438         preempt_enable();
2439 
2440         cgr->chan = p->config->channel;
2441         spin_lock(&p->cgr_lock);
2442 
2443         if (opts) {
2444                 struct qm_mcc_initcgr local_opts = *opts;
2445 
2446                 ret = qman_query_cgr(cgr, &cgr_state);
2447                 if (ret)
2448                         goto out;
2449 
2450                 qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2451                                      be32_to_cpu(cgr_state.cgr.cscn_targ));
2452                 local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2453 
2454                 
2455                 if (flags & QMAN_CGR_FLAG_USE_INIT)
2456                         ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2457                                             &local_opts);
2458                 else
2459                         ret = qm_modify_cgr(cgr, 0, &local_opts);
2460                 if (ret)
2461                         goto out;
2462         }
2463 
2464         list_add(&cgr->node, &p->cgr_cbs);
2465 
2466         
2467         ret = qman_query_cgr(cgr, &cgr_state);
2468         if (ret) {
2469                 
2470                 dev_err(p->config->dev, "CGR HW state partially modified\n");
2471                 ret = 0;
2472                 goto out;
2473         }
2474         if (cgr->cb && cgr_state.cgr.cscn_en &&
2475             qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2476                 cgr->cb(p, cgr, 1);
2477 out:
2478         spin_unlock(&p->cgr_lock);
2479         put_affine_portal();
2480         return ret;
2481 }
2482 EXPORT_SYMBOL(qman_create_cgr);
2483 
2484 int qman_delete_cgr(struct qman_cgr *cgr)
2485 {
2486         unsigned long irqflags;
2487         struct qm_mcr_querycgr cgr_state;
2488         struct qm_mcc_initcgr local_opts;
2489         int ret = 0;
2490         struct qman_cgr *i;
2491         struct qman_portal *p = get_affine_portal();
2492 
2493         if (cgr->chan != p->config->channel) {
2494                 
2495                 dev_err(p->config->dev, "CGR not owned by current portal");
2496                 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2497                         cgr->chan, p->config->channel);
2498 
2499                 ret = -EINVAL;
2500                 goto put_portal;
2501         }
2502         memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2503         spin_lock_irqsave(&p->cgr_lock, irqflags);
2504         list_del(&cgr->node);
2505         
2506 
2507 
2508 
2509         list_for_each_entry(i, &p->cgr_cbs, node)
2510                 if (i->cgrid == cgr->cgrid && i->cb)
2511                         goto release_lock;
2512         ret = qman_query_cgr(cgr, &cgr_state);
2513         if (ret)  {
2514                 
2515                 list_add(&cgr->node, &p->cgr_cbs);
2516                 goto release_lock;
2517         }
2518 
2519         local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2520         qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2521                                be32_to_cpu(cgr_state.cgr.cscn_targ));
2522 
2523         ret = qm_modify_cgr(cgr, 0, &local_opts);
2524         if (ret)
2525                 
2526                 list_add(&cgr->node, &p->cgr_cbs);
2527 release_lock:
2528         spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2529 put_portal:
2530         put_affine_portal();
2531         return ret;
2532 }
2533 EXPORT_SYMBOL(qman_delete_cgr);
2534 
2535 struct cgr_comp {
2536         struct qman_cgr *cgr;
2537         struct completion completion;
2538 };
2539 
2540 static void qman_delete_cgr_smp_call(void *p)
2541 {
2542         qman_delete_cgr((struct qman_cgr *)p);
2543 }
2544 
2545 void qman_delete_cgr_safe(struct qman_cgr *cgr)
2546 {
2547         preempt_disable();
2548         if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2549                 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2550                                          qman_delete_cgr_smp_call, cgr, true);
2551                 preempt_enable();
2552                 return;
2553         }
2554 
2555         qman_delete_cgr(cgr);
2556         preempt_enable();
2557 }
2558 EXPORT_SYMBOL(qman_delete_cgr_safe);
2559 
2560 
2561 
2562 static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2563 {
2564         const union qm_mr_entry *msg;
2565         int found = 0;
2566 
2567         qm_mr_pvb_update(p);
2568         msg = qm_mr_current(p);
2569         while (msg) {
2570                 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2571                         found = 1;
2572                 qm_mr_next(p);
2573                 qm_mr_cci_consume_to_current(p);
2574                 qm_mr_pvb_update(p);
2575                 msg = qm_mr_current(p);
2576         }
2577         return found;
2578 }
2579 
2580 static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2581                                       bool wait)
2582 {
2583         const struct qm_dqrr_entry *dqrr;
2584         int found = 0;
2585 
2586         do {
2587                 qm_dqrr_pvb_update(p);
2588                 dqrr = qm_dqrr_current(p);
2589                 if (!dqrr)
2590                         cpu_relax();
2591         } while (wait && !dqrr);
2592 
2593         while (dqrr) {
2594                 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2595                         found = 1;
2596                 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2597                 qm_dqrr_pvb_update(p);
2598                 qm_dqrr_next(p);
2599                 dqrr = qm_dqrr_current(p);
2600         }
2601         return found;
2602 }
2603 
2604 #define qm_mr_drain(p, V) \
2605         _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2606 
2607 #define qm_dqrr_drain(p, f, S) \
2608         _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2609 
2610 #define qm_dqrr_drain_wait(p, f, S) \
2611         _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2612 
2613 #define qm_dqrr_drain_nomatch(p) \
2614         _qm_dqrr_consume_and_match(p, 0, 0, false)
2615 
2616 int qman_shutdown_fq(u32 fqid)
2617 {
2618         struct qman_portal *p, *channel_portal;
2619         struct device *dev;
2620         union qm_mc_command *mcc;
2621         union qm_mc_result *mcr;
2622         int orl_empty, drain = 0, ret = 0;
2623         u32 channel, wq, res;
2624         u8 state;
2625 
2626         p = get_affine_portal();
2627         dev = p->config->dev;
2628         
2629         mcc = qm_mc_start(&p->p);
2630         qm_fqid_set(&mcc->fq, fqid);
2631         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2632         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2633                 dev_err(dev, "QUERYFQ_NP timeout\n");
2634                 ret = -ETIMEDOUT;
2635                 goto out;
2636         }
2637 
2638         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2639         state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2640         if (state == QM_MCR_NP_STATE_OOS)
2641                 goto out; 
2642 
2643         
2644         mcc = qm_mc_start(&p->p);
2645         qm_fqid_set(&mcc->fq, fqid);
2646         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2647         if (!qm_mc_result_timeout(&p->p, &mcr)) {
2648                 dev_err(dev, "QUERYFQ timeout\n");
2649                 ret = -ETIMEDOUT;
2650                 goto out;
2651         }
2652 
2653         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2654         
2655         channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2656         wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2657 
2658         if (channel < qm_channel_pool1) {
2659                 channel_portal = get_portal_for_channel(channel);
2660                 if (channel_portal == NULL) {
2661                         dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
2662                                 channel);
2663                         ret = -EIO;
2664                         goto out;
2665                 }
2666         } else
2667                 channel_portal = p;
2668 
2669         switch (state) {
2670         case QM_MCR_NP_STATE_TEN_SCHED:
2671         case QM_MCR_NP_STATE_TRU_SCHED:
2672         case QM_MCR_NP_STATE_ACTIVE:
2673         case QM_MCR_NP_STATE_PARKED:
2674                 orl_empty = 0;
2675                 mcc = qm_mc_start(&channel_portal->p);
2676                 qm_fqid_set(&mcc->fq, fqid);
2677                 qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
2678                 if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
2679                         dev_err(dev, "ALTER_RETIRE timeout\n");
2680                         ret = -ETIMEDOUT;
2681                         goto out;
2682                 }
2683                 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2684                             QM_MCR_VERB_ALTER_RETIRE);
2685                 res = mcr->result; 
2686 
2687                 if (res == QM_MCR_RESULT_OK)
2688                         drain_mr_fqrni(&channel_portal->p);
2689 
2690                 if (res == QM_MCR_RESULT_PENDING) {
2691                         
2692 
2693 
2694 
2695 
2696 
2697                         int found_fqrn = 0;
2698                         u16 dequeue_wq = 0;
2699 
2700                         
2701                         drain = 1;
2702 
2703                         if (channel >= qm_channel_pool1 &&
2704                             channel < qm_channel_pool1 + 15) {
2705                                 
2706                                 dequeue_wq = (channel -
2707                                               qm_channel_pool1 + 1)<<4 | wq;
2708                         } else if (channel < qm_channel_pool1) {
2709                                 
2710                                 dequeue_wq = wq;
2711                         } else {
2712                                 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2713                                         fqid, channel);
2714                                 ret = -EBUSY;
2715                                 goto out;
2716                         }
2717                         
2718                         if (channel < qm_channel_pool1)
2719                                 qm_dqrr_sdqcr_set(&channel_portal->p,
2720                                                   QM_SDQCR_TYPE_ACTIVE |
2721                                                   QM_SDQCR_CHANNELS_DEDICATED);
2722                         else
2723                                 qm_dqrr_sdqcr_set(&channel_portal->p,
2724                                                   QM_SDQCR_TYPE_ACTIVE |
2725                                                   QM_SDQCR_CHANNELS_POOL_CONV
2726                                                   (channel));
2727                         do {
2728                                 
2729                                 qm_dqrr_drain_nomatch(&channel_portal->p);
2730                                 
2731                                 found_fqrn = qm_mr_drain(&channel_portal->p,
2732                                                          FQRN);
2733                                 cpu_relax();
2734                         } while (!found_fqrn);
2735                         
2736                         qm_dqrr_sdqcr_set(&channel_portal->p,
2737                                           channel_portal->sdqcr);
2738 
2739                 }
2740                 if (res != QM_MCR_RESULT_OK &&
2741                     res != QM_MCR_RESULT_PENDING) {
2742                         dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2743                                 fqid, res);
2744                         ret = -EIO;
2745                         goto out;
2746                 }
2747                 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2748                         
2749 
2750 
2751 
2752                         orl_empty = 1;
2753                 }
2754                 
2755 
2756 
2757 
2758                 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2759                         
2760                         do {
2761                                 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2762 
2763                                 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2764                                 
2765 
2766 
2767 
2768                         } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2769                 }
2770 
2771                 while (!orl_empty) {
2772                         
2773                         orl_empty = qm_mr_drain(&p->p, FQRL);
2774                         cpu_relax();
2775                 }
2776                 mcc = qm_mc_start(&p->p);
2777                 qm_fqid_set(&mcc->fq, fqid);
2778                 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2779                 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2780                         ret = -ETIMEDOUT;
2781                         goto out;
2782                 }
2783 
2784                 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2785                             QM_MCR_VERB_ALTER_OOS);
2786                 if (mcr->result != QM_MCR_RESULT_OK) {
2787                         dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2788                                 fqid, mcr->result);
2789                         ret = -EIO;
2790                         goto out;
2791                 }
2792                 break;
2793 
2794         case QM_MCR_NP_STATE_RETIRED:
2795                 
2796                 mcc = qm_mc_start(&p->p);
2797                 qm_fqid_set(&mcc->fq, fqid);
2798                 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2799                 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2800                         ret = -ETIMEDOUT;
2801                         goto out;
2802                 }
2803 
2804                 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2805                             QM_MCR_VERB_ALTER_OOS);
2806                 if (mcr->result != QM_MCR_RESULT_OK) {
2807                         dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2808                                 fqid, mcr->result);
2809                         ret = -EIO;
2810                         goto out;
2811                 }
2812                 break;
2813 
2814         case QM_MCR_NP_STATE_OOS:
2815                 
2816                 break;
2817 
2818         default:
2819                 ret = -EIO;
2820         }
2821 
2822 out:
2823         put_affine_portal();
2824         return ret;
2825 }
2826 
2827 const struct qm_portal_config *qman_get_qm_portal_config(
2828                                                 struct qman_portal *portal)
2829 {
2830         return portal->config;
2831 }
2832 EXPORT_SYMBOL(qman_get_qm_portal_config);
2833 
2834 struct gen_pool *qm_fqalloc; 
2835 struct gen_pool *qm_qpalloc; 
2836 struct gen_pool *qm_cgralloc; 
2837 
2838 static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2839 {
2840         unsigned long addr;
2841 
2842         if (!p)
2843                 return -ENODEV;
2844 
2845         addr = gen_pool_alloc(p, cnt);
2846         if (!addr)
2847                 return -ENOMEM;
2848 
2849         *result = addr & ~DPAA_GENALLOC_OFF;
2850 
2851         return 0;
2852 }
2853 
2854 int qman_alloc_fqid_range(u32 *result, u32 count)
2855 {
2856         return qman_alloc_range(qm_fqalloc, result, count);
2857 }
2858 EXPORT_SYMBOL(qman_alloc_fqid_range);
2859 
2860 int qman_alloc_pool_range(u32 *result, u32 count)
2861 {
2862         return qman_alloc_range(qm_qpalloc, result, count);
2863 }
2864 EXPORT_SYMBOL(qman_alloc_pool_range);
2865 
2866 int qman_alloc_cgrid_range(u32 *result, u32 count)
2867 {
2868         return qman_alloc_range(qm_cgralloc, result, count);
2869 }
2870 EXPORT_SYMBOL(qman_alloc_cgrid_range);
2871 
2872 int qman_release_fqid(u32 fqid)
2873 {
2874         int ret = qman_shutdown_fq(fqid);
2875 
2876         if (ret) {
2877                 pr_debug("FQID %d leaked\n", fqid);
2878                 return ret;
2879         }
2880 
2881         gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2882         return 0;
2883 }
2884 EXPORT_SYMBOL(qman_release_fqid);
2885 
2886 static int qpool_cleanup(u32 qp)
2887 {
2888         
2889 
2890 
2891 
2892 
2893 
2894         struct qman_fq fq = {
2895                 .fqid = QM_FQID_RANGE_START
2896         };
2897         int err;
2898 
2899         do {
2900                 struct qm_mcr_queryfq_np np;
2901 
2902                 err = qman_query_fq_np(&fq, &np);
2903                 if (err == -ERANGE)
2904                         
2905                         return 0;
2906                 else if (WARN_ON(err))
2907                         return err;
2908 
2909                 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2910                         struct qm_fqd fqd;
2911 
2912                         err = qman_query_fq(&fq, &fqd);
2913                         if (WARN_ON(err))
2914                                 return err;
2915                         if (qm_fqd_get_chan(&fqd) == qp) {
2916                                 
2917                                 err = qman_shutdown_fq(fq.fqid);
2918                                 if (err)
2919                                         
2920 
2921 
2922 
2923                                         return err;
2924                         }
2925                 }
2926                 
2927                 fq.fqid++;
2928         } while (1);
2929 }
2930 
2931 int qman_release_pool(u32 qp)
2932 {
2933         int ret;
2934 
2935         ret = qpool_cleanup(qp);
2936         if (ret) {
2937                 pr_debug("CHID %d leaked\n", qp);
2938                 return ret;
2939         }
2940 
2941         gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2942         return 0;
2943 }
2944 EXPORT_SYMBOL(qman_release_pool);
2945 
2946 static int cgr_cleanup(u32 cgrid)
2947 {
2948         
2949 
2950 
2951 
2952         struct qman_fq fq = {
2953                 .fqid = QM_FQID_RANGE_START
2954         };
2955         int err;
2956 
2957         do {
2958                 struct qm_mcr_queryfq_np np;
2959 
2960                 err = qman_query_fq_np(&fq, &np);
2961                 if (err == -ERANGE)
2962                         
2963                         return 0;
2964                 else if (WARN_ON(err))
2965                         return err;
2966 
2967                 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2968                         struct qm_fqd fqd;
2969 
2970                         err = qman_query_fq(&fq, &fqd);
2971                         if (WARN_ON(err))
2972                                 return err;
2973                         if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
2974                             fqd.cgid == cgrid) {
2975                                 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2976                                        cgrid, fq.fqid);
2977                                 return -EIO;
2978                         }
2979                 }
2980                 
2981                 fq.fqid++;
2982         } while (1);
2983 }
2984 
2985 int qman_release_cgrid(u32 cgrid)
2986 {
2987         int ret;
2988 
2989         ret = cgr_cleanup(cgrid);
2990         if (ret) {
2991                 pr_debug("CGRID %d leaked\n", cgrid);
2992                 return ret;
2993         }
2994 
2995         gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2996         return 0;
2997 }
2998 EXPORT_SYMBOL(qman_release_cgrid);