This source file includes following definitions.
- __bfa_trc
- bfa_com_port_attach
- bfa_com_ablk_attach
- bfa_com_cee_attach
- bfa_com_sfp_attach
- bfa_com_flash_attach
- bfa_com_diag_attach
- bfa_com_phy_attach
- bfa_com_fru_attach
- bfa_iocfc_sm_stopped_entry
- bfa_iocfc_sm_stopped
- bfa_iocfc_sm_initing_entry
- bfa_iocfc_sm_initing
- bfa_iocfc_sm_dconf_read_entry
- bfa_iocfc_sm_dconf_read
- bfa_iocfc_sm_init_cfg_wait_entry
- bfa_iocfc_sm_init_cfg_wait
- bfa_iocfc_sm_init_cfg_done_entry
- bfa_iocfc_sm_init_cfg_done
- bfa_iocfc_sm_operational_entry
- bfa_iocfc_sm_operational
- bfa_iocfc_sm_dconf_write_entry
- bfa_iocfc_sm_dconf_write
- bfa_iocfc_sm_stopping_entry
- bfa_iocfc_sm_stopping
- bfa_iocfc_sm_enabling_entry
- bfa_iocfc_sm_enabling
- bfa_iocfc_sm_cfg_wait_entry
- bfa_iocfc_sm_cfg_wait
- bfa_iocfc_sm_disabling_entry
- bfa_iocfc_sm_disabling
- bfa_iocfc_sm_disabled_entry
- bfa_iocfc_sm_disabled
- bfa_iocfc_sm_failed_entry
- bfa_iocfc_sm_failed
- bfa_iocfc_sm_init_failed_entry
- bfa_iocfc_sm_init_failed
- bfa_reqq_resume
- bfa_isr_rspq
- bfa_isr_reqq
- bfa_msix_all
- bfa_intx
- bfa_isr_enable
- bfa_isr_disable
- bfa_msix_reqq
- bfa_isr_unhandled
- bfa_msix_rspq
- bfa_msix_lpu_err
- bfa_iocfc_send_cfg
- bfa_iocfc_init_mem
- bfa_iocfc_mem_claim
- bfa_iocfc_start_submod
- bfa_iocfc_disable_submod
- bfa_iocfc_init_cb
- bfa_iocfc_stop_cb
- bfa_iocfc_enable_cb
- bfa_iocfc_disable_cb
- bfa_iocfc_qreg
- bfa_iocfc_res_recfg
- bfa_iocfc_cfgrsp
- bfa_iocfc_reset_queues
- bfa_iocfc_process_faa_addr
- bfa_faa_validate_request
- bfa_faa_query
- bfa_faa_query_reply
- bfa_iocfc_enable_cbfn
- bfa_iocfc_disable_cbfn
- bfa_iocfc_hbfail_cbfn
- bfa_iocfc_reset_cbfn
- bfa_iocfc_meminfo
- bfa_iocfc_attach
- bfa_iocfc_init
- bfa_iocfc_start
- bfa_iocfc_stop
- bfa_iocfc_isr
- bfa_iocfc_get_attr
- bfa_iocfc_israttr_set
- bfa_iocfc_set_snsbase
- bfa_iocfc_enable
- bfa_iocfc_disable
- bfa_iocfc_is_operational
- bfa_iocfc_get_bootwwns
- bfa_iocfc_get_pbc_vports
- bfa_cfg_get_meminfo
- bfa_attach
- bfa_detach
- bfa_comp_deq
- bfa_comp_process
- bfa_comp_free
- bfa_get_pciids
- bfa_cfg_get_default
- bfa_cfg_get_min
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 #include "bfad_drv.h"
  12 #include "bfa_modules.h"
  13 #include "bfi_reg.h"
  14 
  15 BFA_TRC_FILE(HAL, CORE);
  16 
  17 
  18 
  19 
  20 static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
  21         bfa_isr_unhandled,      
  22         bfa_isr_unhandled,      
  23         bfa_fcdiag_intr,        
  24         bfa_isr_unhandled,      
  25         bfa_isr_unhandled,      
  26         bfa_fcport_isr,         
  27         bfa_isr_unhandled,      
  28         bfa_isr_unhandled,      
  29         bfa_uf_isr,             
  30         bfa_fcxp_isr,           
  31         bfa_lps_isr,            
  32         bfa_rport_isr,          
  33         bfa_itn_isr,            
  34         bfa_isr_unhandled,      
  35         bfa_isr_unhandled,      
  36         bfa_isr_unhandled,      
  37         bfa_ioim_isr,           
  38         bfa_ioim_good_comp_isr, 
  39         bfa_tskim_isr,          
  40         bfa_isr_unhandled,      
  41         bfa_isr_unhandled,      
  42         bfa_isr_unhandled,      
  43         bfa_isr_unhandled,      
  44         bfa_isr_unhandled,      
  45         bfa_isr_unhandled,      
  46         bfa_isr_unhandled,      
  47         bfa_isr_unhandled,      
  48         bfa_isr_unhandled,      
  49         bfa_isr_unhandled,      
  50         bfa_isr_unhandled,      
  51         bfa_isr_unhandled,      
  52         bfa_isr_unhandled,      
  53 };
  54 
  55 
  56 
  57 static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
  58         NULL,
  59         NULL,           
  60         NULL,           
  61         NULL,           
  62         NULL,           
  63         NULL,           
  64         bfa_iocfc_isr,  
  65         NULL,
  66 };
  67 
  68 
  69 
  70 void
  71 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
  72 {
  73         int             tail = trcm->tail;
  74         struct bfa_trc_s        *trc = &trcm->trc[tail];
  75 
  76         if (trcm->stopped)
  77                 return;
  78 
  79         trc->fileno = (u16) fileno;
  80         trc->line = (u16) line;
  81         trc->data.u64 = data;
  82         trc->timestamp = BFA_TRC_TS(trcm);
  83 
  84         trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
  85         if (trcm->tail == trcm->head)
  86                 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
  87 }
  88 
  89 static void
  90 bfa_com_port_attach(struct bfa_s *bfa)
  91 {
  92         struct bfa_port_s       *port = &bfa->modules.port;
  93         struct bfa_mem_dma_s    *port_dma = BFA_MEM_PORT_DMA(bfa);
  94 
  95         bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
  96         bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
  97 }
  98 
  99 
 100 
 101 
 102 static void
 103 bfa_com_ablk_attach(struct bfa_s *bfa)
 104 {
 105         struct bfa_ablk_s       *ablk = &bfa->modules.ablk;
 106         struct bfa_mem_dma_s    *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
 107 
 108         bfa_ablk_attach(ablk, &bfa->ioc);
 109         bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
 110 }
 111 
 112 static void
 113 bfa_com_cee_attach(struct bfa_s *bfa)
 114 {
 115         struct bfa_cee_s        *cee = &bfa->modules.cee;
 116         struct bfa_mem_dma_s    *cee_dma = BFA_MEM_CEE_DMA(bfa);
 117 
 118         cee->trcmod = bfa->trcmod;
 119         bfa_cee_attach(cee, &bfa->ioc, bfa);
 120         bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
 121 }
 122 
 123 static void
 124 bfa_com_sfp_attach(struct bfa_s *bfa)
 125 {
 126         struct bfa_sfp_s        *sfp = BFA_SFP_MOD(bfa);
 127         struct bfa_mem_dma_s    *sfp_dma = BFA_MEM_SFP_DMA(bfa);
 128 
 129         bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
 130         bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
 131 }
 132 
 133 static void
 134 bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
 135 {
 136         struct bfa_flash_s      *flash = BFA_FLASH(bfa);
 137         struct bfa_mem_dma_s    *flash_dma = BFA_MEM_FLASH_DMA(bfa);
 138 
 139         bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
 140         bfa_flash_memclaim(flash, flash_dma->kva_curp,
 141                            flash_dma->dma_curp, mincfg);
 142 }
 143 
 144 static void
 145 bfa_com_diag_attach(struct bfa_s *bfa)
 146 {
 147         struct bfa_diag_s       *diag = BFA_DIAG_MOD(bfa);
 148         struct bfa_mem_dma_s    *diag_dma = BFA_MEM_DIAG_DMA(bfa);
 149 
 150         bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
 151         bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
 152 }
 153 
 154 static void
 155 bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
 156 {
 157         struct bfa_phy_s        *phy = BFA_PHY(bfa);
 158         struct bfa_mem_dma_s    *phy_dma = BFA_MEM_PHY_DMA(bfa);
 159 
 160         bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
 161         bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
 162 }
 163 
 164 static void
 165 bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
 166 {
 167         struct bfa_fru_s        *fru = BFA_FRU(bfa);
 168         struct bfa_mem_dma_s    *fru_dma = BFA_MEM_FRU_DMA(bfa);
 169 
 170         bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg);
 171         bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg);
 172 }
 173 
 174 
 175 
 176 
 177 
 178 
 179 
 180 
 181 #define BFA_IOCFC_TOV           5000    
 182 
 183 enum {
 184         BFA_IOCFC_ACT_NONE      = 0,
 185         BFA_IOCFC_ACT_INIT      = 1,
 186         BFA_IOCFC_ACT_STOP      = 2,
 187         BFA_IOCFC_ACT_DISABLE   = 3,
 188         BFA_IOCFC_ACT_ENABLE    = 4,
 189 };
 190 
 191 #define DEF_CFG_NUM_FABRICS             1
 192 #define DEF_CFG_NUM_LPORTS              256
 193 #define DEF_CFG_NUM_CQS                 4
 194 #define DEF_CFG_NUM_IOIM_REQS           (BFA_IOIM_MAX)
 195 #define DEF_CFG_NUM_TSKIM_REQS          128
 196 #define DEF_CFG_NUM_FCXP_REQS           64
 197 #define DEF_CFG_NUM_UF_BUFS             64
 198 #define DEF_CFG_NUM_RPORTS              1024
 199 #define DEF_CFG_NUM_ITNIMS              (DEF_CFG_NUM_RPORTS)
 200 #define DEF_CFG_NUM_TINS                256
 201 
 202 #define DEF_CFG_NUM_SGPGS               2048
 203 #define DEF_CFG_NUM_REQQ_ELEMS          256
 204 #define DEF_CFG_NUM_RSPQ_ELEMS          64
 205 #define DEF_CFG_NUM_SBOOT_TGTS          16
 206 #define DEF_CFG_NUM_SBOOT_LUNS          16
 207 
 208 
 209 
 210 
 211 bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
 212 bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
 213 bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
 214 bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
 215                    struct bfa_iocfc_s, enum iocfc_event);
 216 bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
 217                    struct bfa_iocfc_s, enum iocfc_event);
 218 bfa_fsm_state_decl(bfa_iocfc, operational,
 219                    struct bfa_iocfc_s, enum iocfc_event);
 220 bfa_fsm_state_decl(bfa_iocfc, dconf_write,
 221                    struct bfa_iocfc_s, enum iocfc_event);
 222 bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
 223 bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
 224 bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
 225 bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
 226 bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
 227 bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
 228 bfa_fsm_state_decl(bfa_iocfc, init_failed,
 229                    struct bfa_iocfc_s, enum iocfc_event);
 230 
 231 
 232 
 233 
 234 static void bfa_iocfc_start_submod(struct bfa_s *bfa);
 235 static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
 236 static void bfa_iocfc_send_cfg(void *bfa_arg);
 237 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
 238 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
 239 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
 240 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
 241 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
 242 static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
 243 static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
 244 static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
 245 static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
 246 
 247 static void
 248 bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
 249 {
 250 }
 251 
 252 static void
 253 bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 254 {
 255         bfa_trc(iocfc->bfa, event);
 256 
 257         switch (event) {
 258         case IOCFC_E_INIT:
 259         case IOCFC_E_ENABLE:
 260                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
 261                 break;
 262         default:
 263                 bfa_sm_fault(iocfc->bfa, event);
 264                 break;
 265         }
 266 }
 267 
 268 static void
 269 bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
 270 {
 271         bfa_ioc_enable(&iocfc->bfa->ioc);
 272 }
 273 
 274 static void
 275 bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 276 {
 277         bfa_trc(iocfc->bfa, event);
 278 
 279         switch (event) {
 280         case IOCFC_E_IOC_ENABLED:
 281                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
 282                 break;
 283 
 284         case IOCFC_E_DISABLE:
 285                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 286                 break;
 287 
 288         case IOCFC_E_STOP:
 289                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 290                 break;
 291 
 292         case IOCFC_E_IOC_FAILED:
 293                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 294                 break;
 295         default:
 296                 bfa_sm_fault(iocfc->bfa, event);
 297                 break;
 298         }
 299 }
 300 
 301 static void
 302 bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
 303 {
 304         bfa_dconf_modinit(iocfc->bfa);
 305 }
 306 
 307 static void
 308 bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 309 {
 310         bfa_trc(iocfc->bfa, event);
 311 
 312         switch (event) {
 313         case IOCFC_E_DCONF_DONE:
 314                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
 315                 break;
 316 
 317         case IOCFC_E_DISABLE:
 318                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 319                 break;
 320 
 321         case IOCFC_E_STOP:
 322                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 323                 break;
 324 
 325         case IOCFC_E_IOC_FAILED:
 326                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 327                 break;
 328         default:
 329                 bfa_sm_fault(iocfc->bfa, event);
 330                 break;
 331         }
 332 }
 333 
 334 static void
 335 bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
 336 {
 337         bfa_iocfc_send_cfg(iocfc->bfa);
 338 }
 339 
 340 static void
 341 bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 342 {
 343         bfa_trc(iocfc->bfa, event);
 344 
 345         switch (event) {
 346         case IOCFC_E_CFG_DONE:
 347                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
 348                 break;
 349 
 350         case IOCFC_E_DISABLE:
 351                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 352                 break;
 353 
 354         case IOCFC_E_STOP:
 355                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 356                 break;
 357 
 358         case IOCFC_E_IOC_FAILED:
 359                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 360                 break;
 361         default:
 362                 bfa_sm_fault(iocfc->bfa, event);
 363                 break;
 364         }
 365 }
 366 
 367 static void
 368 bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
 369 {
 370         iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 371         bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
 372                      bfa_iocfc_init_cb, iocfc->bfa);
 373 }
 374 
 375 static void
 376 bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 377 {
 378         bfa_trc(iocfc->bfa, event);
 379 
 380         switch (event) {
 381         case IOCFC_E_START:
 382                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
 383                 break;
 384         case IOCFC_E_STOP:
 385                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 386                 break;
 387         case IOCFC_E_DISABLE:
 388                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 389                 break;
 390         case IOCFC_E_IOC_FAILED:
 391                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 392                 break;
 393         default:
 394                 bfa_sm_fault(iocfc->bfa, event);
 395                 break;
 396         }
 397 }
 398 
 399 static void
 400 bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
 401 {
 402         bfa_fcport_init(iocfc->bfa);
 403         bfa_iocfc_start_submod(iocfc->bfa);
 404 }
 405 
 406 static void
 407 bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 408 {
 409         bfa_trc(iocfc->bfa, event);
 410 
 411         switch (event) {
 412         case IOCFC_E_STOP:
 413                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 414                 break;
 415         case IOCFC_E_DISABLE:
 416                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 417                 break;
 418         case IOCFC_E_IOC_FAILED:
 419                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 420                 break;
 421         default:
 422                 bfa_sm_fault(iocfc->bfa, event);
 423                 break;
 424         }
 425 }
 426 
 427 static void
 428 bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
 429 {
 430         bfa_dconf_modexit(iocfc->bfa);
 431 }
 432 
 433 static void
 434 bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 435 {
 436         bfa_trc(iocfc->bfa, event);
 437 
 438         switch (event) {
 439         case IOCFC_E_DCONF_DONE:
 440         case IOCFC_E_IOC_FAILED:
 441                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 442                 break;
 443         default:
 444                 bfa_sm_fault(iocfc->bfa, event);
 445                 break;
 446         }
 447 }
 448 
 449 static void
 450 bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
 451 {
 452         bfa_ioc_disable(&iocfc->bfa->ioc);
 453 }
 454 
 455 static void
 456 bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 457 {
 458         bfa_trc(iocfc->bfa, event);
 459 
 460         switch (event) {
 461         case IOCFC_E_IOC_DISABLED:
 462                 bfa_isr_disable(iocfc->bfa);
 463                 bfa_iocfc_disable_submod(iocfc->bfa);
 464                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
 465                 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 466                 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
 467                              bfa_iocfc_stop_cb, iocfc->bfa);
 468                 break;
 469 
 470         case IOCFC_E_IOC_ENABLED:
 471         case IOCFC_E_DCONF_DONE:
 472         case IOCFC_E_CFG_DONE:
 473                 break;
 474 
 475         default:
 476                 bfa_sm_fault(iocfc->bfa, event);
 477                 break;
 478         }
 479 }
 480 
 481 static void
 482 bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
 483 {
 484         bfa_ioc_enable(&iocfc->bfa->ioc);
 485 }
 486 
 487 static void
 488 bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 489 {
 490         bfa_trc(iocfc->bfa, event);
 491 
 492         switch (event) {
 493         case IOCFC_E_IOC_ENABLED:
 494                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
 495                 break;
 496 
 497         case IOCFC_E_DISABLE:
 498                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 499                 break;
 500 
 501         case IOCFC_E_STOP:
 502                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 503                 break;
 504 
 505         case IOCFC_E_IOC_FAILED:
 506                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 507 
 508                 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
 509                         break;
 510 
 511                 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
 512                 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
 513                              bfa_iocfc_enable_cb, iocfc->bfa);
 514                 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
 515                 break;
 516         default:
 517                 bfa_sm_fault(iocfc->bfa, event);
 518                 break;
 519         }
 520 }
 521 
 522 static void
 523 bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
 524 {
 525         bfa_iocfc_send_cfg(iocfc->bfa);
 526 }
 527 
 528 static void
 529 bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 530 {
 531         bfa_trc(iocfc->bfa, event);
 532 
 533         switch (event) {
 534         case IOCFC_E_CFG_DONE:
 535                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
 536                 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
 537                         break;
 538 
 539                 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 540                 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
 541                              bfa_iocfc_enable_cb, iocfc->bfa);
 542                 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
 543                 break;
 544         case IOCFC_E_DISABLE:
 545                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 546                 break;
 547 
 548         case IOCFC_E_STOP:
 549                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 550                 break;
 551         case IOCFC_E_IOC_FAILED:
 552                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 553                 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
 554                         break;
 555 
 556                 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
 557                 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
 558                              bfa_iocfc_enable_cb, iocfc->bfa);
 559                 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
 560                 break;
 561         default:
 562                 bfa_sm_fault(iocfc->bfa, event);
 563                 break;
 564         }
 565 }
 566 
 567 static void
 568 bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
 569 {
 570         bfa_ioc_disable(&iocfc->bfa->ioc);
 571 }
 572 
 573 static void
 574 bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 575 {
 576         bfa_trc(iocfc->bfa, event);
 577 
 578         switch (event) {
 579         case IOCFC_E_IOC_DISABLED:
 580                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
 581                 break;
 582         case IOCFC_E_IOC_ENABLED:
 583         case IOCFC_E_DCONF_DONE:
 584         case IOCFC_E_CFG_DONE:
 585                 break;
 586         default:
 587                 bfa_sm_fault(iocfc->bfa, event);
 588                 break;
 589         }
 590 }
 591 
 592 static void
 593 bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
 594 {
 595         bfa_isr_disable(iocfc->bfa);
 596         bfa_iocfc_disable_submod(iocfc->bfa);
 597         iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 598         bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
 599                      bfa_iocfc_disable_cb, iocfc->bfa);
 600 }
 601 
 602 static void
 603 bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 604 {
 605         bfa_trc(iocfc->bfa, event);
 606 
 607         switch (event) {
 608         case IOCFC_E_STOP:
 609                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 610                 break;
 611         case IOCFC_E_ENABLE:
 612                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
 613                 break;
 614         default:
 615                 bfa_sm_fault(iocfc->bfa, event);
 616                 break;
 617         }
 618 }
 619 
 620 static void
 621 bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
 622 {
 623         bfa_isr_disable(iocfc->bfa);
 624         bfa_iocfc_disable_submod(iocfc->bfa);
 625 }
 626 
 627 static void
 628 bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 629 {
 630         bfa_trc(iocfc->bfa, event);
 631 
 632         switch (event) {
 633         case IOCFC_E_STOP:
 634                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 635                 break;
 636         case IOCFC_E_DISABLE:
 637                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 638                 break;
 639         case IOCFC_E_IOC_ENABLED:
 640                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
 641                 break;
 642         case IOCFC_E_IOC_FAILED:
 643                 break;
 644         default:
 645                 bfa_sm_fault(iocfc->bfa, event);
 646                 break;
 647         }
 648 }
 649 
 650 static void
 651 bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
 652 {
 653         bfa_isr_disable(iocfc->bfa);
 654         iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
 655         bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
 656                      bfa_iocfc_init_cb, iocfc->bfa);
 657 }
 658 
 659 static void
 660 bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 661 {
 662         bfa_trc(iocfc->bfa, event);
 663 
 664         switch (event) {
 665         case IOCFC_E_STOP:
 666                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 667                 break;
 668         case IOCFC_E_DISABLE:
 669                 bfa_ioc_disable(&iocfc->bfa->ioc);
 670                 break;
 671         case IOCFC_E_IOC_ENABLED:
 672                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
 673                 break;
 674         case IOCFC_E_IOC_DISABLED:
 675                 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
 676                 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 677                 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
 678                              bfa_iocfc_disable_cb, iocfc->bfa);
 679                 break;
 680         case IOCFC_E_IOC_FAILED:
 681                 break;
 682         default:
 683                 bfa_sm_fault(iocfc->bfa, event);
 684                 break;
 685         }
 686 }
 687 
 688 
 689 
 690 
 691 static void
 692 bfa_reqq_resume(struct bfa_s *bfa, int qid)
 693 {
 694         struct list_head *waitq, *qe, *qen;
 695         struct bfa_reqq_wait_s *wqe;
 696 
 697         waitq = bfa_reqq(bfa, qid);
 698         list_for_each_safe(qe, qen, waitq) {
 699                 
 700 
 701 
 702                 if (bfa_reqq_full(bfa, qid))
 703                         break;
 704 
 705                 list_del(qe);
 706                 wqe = (struct bfa_reqq_wait_s *) qe;
 707                 wqe->qresume(wqe->cbarg);
 708         }
 709 }
 710 
 711 bfa_boolean_t
 712 bfa_isr_rspq(struct bfa_s *bfa, int qid)
 713 {
 714         struct bfi_msg_s *m;
 715         u32     pi, ci;
 716         struct list_head *waitq;
 717         bfa_boolean_t ret;
 718 
 719         ci = bfa_rspq_ci(bfa, qid);
 720         pi = bfa_rspq_pi(bfa, qid);
 721 
 722         ret = (ci != pi);
 723 
 724         while (ci != pi) {
 725                 m = bfa_rspq_elem(bfa, qid, ci);
 726                 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
 727 
 728                 bfa_isrs[m->mhdr.msg_class] (bfa, m);
 729                 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
 730         }
 731 
 732         
 733 
 734 
 735         bfa_isr_rspq_ack(bfa, qid, ci);
 736 
 737         
 738 
 739 
 740         waitq = bfa_reqq(bfa, qid);
 741         if (!list_empty(waitq))
 742                 bfa_reqq_resume(bfa, qid);
 743 
 744         return ret;
 745 }
 746 
 747 static inline void
 748 bfa_isr_reqq(struct bfa_s *bfa, int qid)
 749 {
 750         struct list_head *waitq;
 751 
 752         bfa_isr_reqq_ack(bfa, qid);
 753 
 754         
 755 
 756 
 757         waitq = bfa_reqq(bfa, qid);
 758         if (!list_empty(waitq))
 759                 bfa_reqq_resume(bfa, qid);
 760 }
 761 
 762 void
 763 bfa_msix_all(struct bfa_s *bfa, int vec)
 764 {
 765         u32     intr, qintr;
 766         int     queue;
 767 
 768         intr = readl(bfa->iocfc.bfa_regs.intr_status);
 769         if (!intr)
 770                 return;
 771 
 772         
 773 
 774 
 775         qintr = intr & __HFN_INT_RME_MASK;
 776         if (qintr && bfa->queue_process) {
 777                 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 778                         bfa_isr_rspq(bfa, queue);
 779         }
 780 
 781         intr &= ~qintr;
 782         if (!intr)
 783                 return;
 784 
 785         
 786 
 787 
 788         qintr = intr & __HFN_INT_CPE_MASK;
 789         if (qintr && bfa->queue_process) {
 790                 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 791                         bfa_isr_reqq(bfa, queue);
 792         }
 793         intr &= ~qintr;
 794         if (!intr)
 795                 return;
 796 
 797         bfa_msix_lpu_err(bfa, intr);
 798 }
 799 
 800 bfa_boolean_t
 801 bfa_intx(struct bfa_s *bfa)
 802 {
 803         u32 intr, qintr;
 804         int queue;
 805         bfa_boolean_t rspq_comp = BFA_FALSE;
 806 
 807         intr = readl(bfa->iocfc.bfa_regs.intr_status);
 808 
 809         qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
 810         if (qintr)
 811                 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 812 
 813         
 814 
 815 
 816         if (bfa->queue_process) {
 817                 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 818                         if (bfa_isr_rspq(bfa, queue))
 819                                 rspq_comp = BFA_TRUE;
 820         }
 821 
 822         if (!intr)
 823                 return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
 824 
 825         
 826 
 827 
 828         qintr = intr & __HFN_INT_CPE_MASK;
 829         if (qintr && bfa->queue_process) {
 830                 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 831                         bfa_isr_reqq(bfa, queue);
 832         }
 833         intr &= ~qintr;
 834         if (!intr)
 835                 return BFA_TRUE;
 836 
 837         if (bfa->intr_enabled)
 838                 bfa_msix_lpu_err(bfa, intr);
 839 
 840         return BFA_TRUE;
 841 }
 842 
 843 void
 844 bfa_isr_enable(struct bfa_s *bfa)
 845 {
 846         u32 umsk;
 847         int port_id = bfa_ioc_portid(&bfa->ioc);
 848 
 849         bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc));
 850         bfa_trc(bfa, port_id);
 851 
 852         bfa_msix_ctrl_install(bfa);
 853 
 854         if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
 855                 umsk = __HFN_INT_ERR_MASK_CT2;
 856                 umsk |= port_id == 0 ?
 857                         __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
 858         } else {
 859                 umsk = __HFN_INT_ERR_MASK;
 860                 umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
 861         }
 862 
 863         writel(umsk, bfa->iocfc.bfa_regs.intr_status);
 864         writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
 865         bfa->iocfc.intr_mask = ~umsk;
 866         bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
 867 
 868         
 869 
 870 
 871         bfa->intr_enabled = BFA_TRUE;
 872 }
 873 
 874 void
 875 bfa_isr_disable(struct bfa_s *bfa)
 876 {
 877         bfa->intr_enabled = BFA_FALSE;
 878         bfa_isr_mode_set(bfa, BFA_FALSE);
 879         writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
 880         bfa_msix_uninstall(bfa);
 881 }
 882 
 883 void
 884 bfa_msix_reqq(struct bfa_s *bfa, int vec)
 885 {
 886         bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
 887 }
 888 
 889 void
 890 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
 891 {
 892         bfa_trc(bfa, m->mhdr.msg_class);
 893         bfa_trc(bfa, m->mhdr.msg_id);
 894         bfa_trc(bfa, m->mhdr.mtag.i2htok);
 895         WARN_ON(1);
 896         bfa_trc_stop(bfa->trcmod);
 897 }
 898 
 899 void
 900 bfa_msix_rspq(struct bfa_s *bfa, int vec)
 901 {
 902         bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
 903 }
 904 
 905 void
 906 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 907 {
 908         u32 intr, curr_value;
 909         bfa_boolean_t lpu_isr, halt_isr, pss_isr;
 910 
 911         intr = readl(bfa->iocfc.bfa_regs.intr_status);
 912 
 913         if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
 914                 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
 915                 pss_isr  = intr & __HFN_INT_ERR_PSS_CT2;
 916                 lpu_isr  = intr & (__HFN_INT_MBOX_LPU0_CT2 |
 917                                    __HFN_INT_MBOX_LPU1_CT2);
 918                 intr    &= __HFN_INT_ERR_MASK_CT2;
 919         } else {
 920                 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
 921                                           (intr & __HFN_INT_LL_HALT) : 0;
 922                 pss_isr  = intr & __HFN_INT_ERR_PSS;
 923                 lpu_isr  = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
 924                 intr    &= __HFN_INT_ERR_MASK;
 925         }
 926 
 927         if (lpu_isr)
 928                 bfa_ioc_mbox_isr(&bfa->ioc);
 929 
 930         if (intr) {
 931                 if (halt_isr) {
 932                         
 933 
 934 
 935 
 936 
 937                         curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
 938                         curr_value &= ~__FW_INIT_HALT_P;
 939                         writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
 940                 }
 941 
 942                 if (pss_isr) {
 943                         
 944 
 945 
 946 
 947 
 948                         curr_value = readl(
 949                                         bfa->ioc.ioc_regs.pss_err_status_reg);
 950                         writel(curr_value,
 951                                 bfa->ioc.ioc_regs.pss_err_status_reg);
 952                 }
 953 
 954                 writel(intr, bfa->iocfc.bfa_regs.intr_status);
 955                 bfa_ioc_error_isr(&bfa->ioc);
 956         }
 957 }
 958 
 959 
 960 
 961 
 962 
 963 
 964 
 965 
 966 
 967 
 968 
 969 
 970 static void
 971 bfa_iocfc_send_cfg(void *bfa_arg)
 972 {
 973         struct bfa_s *bfa = bfa_arg;
 974         struct bfa_iocfc_s *iocfc = &bfa->iocfc;
 975         struct bfi_iocfc_cfg_req_s cfg_req;
 976         struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
 977         struct bfa_iocfc_cfg_s  *cfg = &iocfc->cfg;
 978         int             i;
 979 
 980         WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
 981         bfa_trc(bfa, cfg->fwcfg.num_cqs);
 982 
 983         bfa_iocfc_reset_queues(bfa);
 984 
 985         
 986 
 987 
 988         cfg_info->single_msix_vec = 0;
 989         if (bfa->msix.nvecs == 1)
 990                 cfg_info->single_msix_vec = 1;
 991         cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
 992         cfg_info->num_cqs = cfg->fwcfg.num_cqs;
 993         cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa,
 994                                                cfg->fwcfg.num_ioim_reqs));
 995         cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
 996 
 997         bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
 998         
 999 
1000 
1001         for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1002                 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
1003                                     iocfc->req_cq_ba[i].pa);
1004                 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
1005                                     iocfc->req_cq_shadow_ci[i].pa);
1006                 cfg_info->req_cq_elems[i] =
1007                         cpu_to_be16(cfg->drvcfg.num_reqq_elems);
1008 
1009                 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
1010                                     iocfc->rsp_cq_ba[i].pa);
1011                 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
1012                                     iocfc->rsp_cq_shadow_pi[i].pa);
1013                 cfg_info->rsp_cq_elems[i] =
1014                         cpu_to_be16(cfg->drvcfg.num_rspq_elems);
1015         }
1016 
1017         
1018 
1019 
1020 
1021         if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
1022                 cfg_info->intr_attr.coalesce = BFA_TRUE;
1023 
1024         
1025 
1026 
1027         bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
1028                     bfa_fn_lpu(bfa));
1029         bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
1030 
1031         bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
1032                           sizeof(struct bfi_iocfc_cfg_req_s));
1033 }
1034 
1035 static void
1036 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1037                    struct bfa_pcidev_s *pcidev)
1038 {
1039         struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1040 
1041         bfa->bfad = bfad;
1042         iocfc->bfa = bfa;
1043         iocfc->cfg = *cfg;
1044 
1045         
1046 
1047 
1048         if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
1049                 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
1050                 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
1051                 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
1052                 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
1053                 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
1054                 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
1055                 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
1056                 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
1057                 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
1058                 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
1059                 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
1060                 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
1061         } else {
1062                 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
1063                 iocfc->hwif.hw_reqq_ack = NULL;
1064                 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
1065                 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
1066                 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
1067                 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
1068                 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
1069                 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
1070                 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
1071                 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
1072                 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
1073                         bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
1074                 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
1075                         bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
1076         }
1077 
1078         if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
1079                 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
1080                 iocfc->hwif.hw_isr_mode_set = NULL;
1081                 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
1082         }
1083 
1084         iocfc->hwif.hw_reginit(bfa);
1085         bfa->msix.nvecs = 0;
1086 }
1087 
1088 static void
1089 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
1090 {
1091         u8      *dm_kva = NULL;
1092         u64     dm_pa = 0;
1093         int     i, per_reqq_sz, per_rspq_sz;
1094         struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
1095         struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1096         struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1097         struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
1098 
1099         
1100         bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
1101                         bfa_mem_dma_phys(ioc_dma));
1102 
1103         
1104         per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1105                                 BFA_DMA_ALIGN_SZ);
1106         per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1107                                 BFA_DMA_ALIGN_SZ);
1108 
1109         for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1110                 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
1111                 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
1112                 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
1113                 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
1114 
1115                 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
1116                 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
1117                 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
1118                 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
1119         }
1120 
1121         
1122         dm_kva = bfa_mem_dma_virt(iocfc_dma);
1123         dm_pa  = bfa_mem_dma_phys(iocfc_dma);
1124 
1125         for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1126                 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
1127                 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
1128                 dm_kva += BFA_CACHELINE_SZ;
1129                 dm_pa += BFA_CACHELINE_SZ;
1130 
1131                 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
1132                 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
1133                 dm_kva += BFA_CACHELINE_SZ;
1134                 dm_pa += BFA_CACHELINE_SZ;
1135         }
1136 
1137         
1138         bfa->iocfc.cfg_info.kva = dm_kva;
1139         bfa->iocfc.cfg_info.pa = dm_pa;
1140         bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
1141         dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1142         dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1143 
1144         
1145         bfa->iocfc.cfgrsp_dma.kva = dm_kva;
1146         bfa->iocfc.cfgrsp_dma.pa = dm_pa;
1147         bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
1148         dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1149                         BFA_CACHELINE_SZ);
1150         dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1151                         BFA_CACHELINE_SZ);
1152 
1153         
1154         bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
1155         bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
1156 }
1157 
1158 
1159 
1160 
1161 static void
1162 bfa_iocfc_start_submod(struct bfa_s *bfa)
1163 {
1164         int             i;
1165 
1166         bfa->queue_process = BFA_TRUE;
1167         for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1168                 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
1169 
1170         bfa_fcport_start(bfa);
1171         bfa_uf_start(bfa);
1172         
1173 
1174 
1175 
1176         bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa);
1177 
1178         bfa->iocfc.submod_enabled = BFA_TRUE;
1179 }
1180 
1181 
1182 
1183 
1184 static void
1185 bfa_iocfc_disable_submod(struct bfa_s *bfa)
1186 {
1187         if (bfa->iocfc.submod_enabled == BFA_FALSE)
1188                 return;
1189 
1190         bfa_fcdiag_iocdisable(bfa);
1191         bfa_fcport_iocdisable(bfa);
1192         bfa_fcxp_iocdisable(bfa);
1193         bfa_lps_iocdisable(bfa);
1194         bfa_rport_iocdisable(bfa);
1195         bfa_fcp_iocdisable(bfa);
1196         bfa_dconf_iocdisable(bfa);
1197 
1198         bfa->iocfc.submod_enabled = BFA_FALSE;
1199 }
1200 
1201 static void
1202 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
1203 {
1204         struct bfa_s    *bfa = bfa_arg;
1205 
1206         if (complete)
1207                 bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
1208 }
1209 
1210 static void
1211 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
1212 {
1213         struct bfa_s  *bfa = bfa_arg;
1214         struct bfad_s *bfad = bfa->bfad;
1215 
1216         if (compl)
1217                 complete(&bfad->comp);
1218 }
1219 
1220 static void
1221 bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
1222 {
1223         struct bfa_s    *bfa = bfa_arg;
1224         struct bfad_s *bfad = bfa->bfad;
1225 
1226         if (compl)
1227                 complete(&bfad->enable_comp);
1228 }
1229 
1230 static void
1231 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
1232 {
1233         struct bfa_s  *bfa = bfa_arg;
1234         struct bfad_s *bfad = bfa->bfad;
1235 
1236         if (compl)
1237                 complete(&bfad->disable_comp);
1238 }
1239 
1240 
1241 
1242 
1243 static void
1244 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
1245 {
1246         int     i;
1247         struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
1248         void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
1249 
1250         for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
1251                 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
1252                 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
1253                 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
1254                 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
1255                 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
1256                 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
1257                 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
1258         }
1259 }
1260 
1261 static void
1262 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
1263 {
1264         struct bfa_iocfc_s      *iocfc   = &bfa->iocfc;
1265         struct bfi_iocfc_cfg_s  *cfg_info = iocfc->cfginfo;
1266 
1267         bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
1268         bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
1269         bfa_rport_res_recfg(bfa, fwcfg->num_rports);
1270         bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs),
1271                           fwcfg->num_ioim_reqs);
1272         bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
1273 }
1274 
1275 
1276 
1277 
1278 static void
1279 bfa_iocfc_cfgrsp(struct bfa_s *bfa)
1280 {
1281         struct bfa_iocfc_s              *iocfc   = &bfa->iocfc;
1282         struct bfi_iocfc_cfgrsp_s       *cfgrsp  = iocfc->cfgrsp;
1283         struct bfa_iocfc_fwcfg_s        *fwcfg   = &cfgrsp->fwcfg;
1284 
1285         fwcfg->num_cqs        = fwcfg->num_cqs;
1286         fwcfg->num_ioim_reqs  = be16_to_cpu(fwcfg->num_ioim_reqs);
1287         fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
1288         fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
1289         fwcfg->num_fcxp_reqs  = be16_to_cpu(fwcfg->num_fcxp_reqs);
1290         fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
1291         fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
1292 
1293         
1294 
1295 
1296         bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
1297 
1298         
1299 
1300 
1301         bfa_iocfc_res_recfg(bfa, fwcfg);
1302 
1303         
1304 
1305 
1306         bfa_msix_queue_install(bfa);
1307 
1308         if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
1309                 bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
1310                 bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
1311                 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
1312         }
1313 }
1314 
1315 void
1316 bfa_iocfc_reset_queues(struct bfa_s *bfa)
1317 {
1318         int             q;
1319 
1320         for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
1321                 bfa_reqq_ci(bfa, q) = 0;
1322                 bfa_reqq_pi(bfa, q) = 0;
1323                 bfa_rspq_ci(bfa, q) = 0;
1324                 bfa_rspq_pi(bfa, q) = 0;
1325         }
1326 }
1327 
1328 
1329 
1330 
1331 static void
1332 bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
1333 {
1334         struct bfa_iocfc_s              *iocfc   = &bfa->iocfc;
1335         struct bfi_iocfc_cfgrsp_s       *cfgrsp  = iocfc->cfgrsp;
1336 
1337         cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
1338         cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
1339 
1340         bfa->ioc.attr->pwwn = msg->pwwn;
1341         bfa->ioc.attr->nwwn = msg->nwwn;
1342         bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
1343 }
1344 
1345 
1346 
1347 
1348 
1349 
1350 static bfa_status_t
1351 bfa_faa_validate_request(struct bfa_s *bfa)
1352 {
1353         enum bfa_ioc_type_e     ioc_type = bfa_get_type(bfa);
1354         u32     card_type = bfa->ioc.attr->card_type;
1355 
1356         if (bfa_ioc_is_operational(&bfa->ioc)) {
1357                 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
1358                         return BFA_STATUS_FEATURE_NOT_SUPPORTED;
1359         } else {
1360                 return BFA_STATUS_IOC_NON_OP;
1361         }
1362 
1363         return BFA_STATUS_OK;
1364 }
1365 
1366 bfa_status_t
1367 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
1368                 bfa_cb_iocfc_t cbfn, void *cbarg)
1369 {
1370         struct bfi_faa_query_s  faa_attr_req;
1371         struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1372         bfa_status_t            status;
1373 
1374         status = bfa_faa_validate_request(bfa);
1375         if (status != BFA_STATUS_OK)
1376                 return status;
1377 
1378         if (iocfc->faa_args.busy == BFA_TRUE)
1379                 return BFA_STATUS_DEVBUSY;
1380 
1381         iocfc->faa_args.faa_attr = attr;
1382         iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
1383         iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
1384 
1385         iocfc->faa_args.busy = BFA_TRUE;
1386         memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
1387         bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
1388                 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
1389 
1390         bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
1391                 sizeof(struct bfi_faa_query_s));
1392 
1393         return BFA_STATUS_OK;
1394 }
1395 
1396 
1397 
1398 
1399 static void
1400 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
1401                 bfi_faa_query_rsp_t *rsp)
1402 {
1403         void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
1404 
1405         if (iocfc->faa_args.faa_attr) {
1406                 iocfc->faa_args.faa_attr->faa = rsp->faa;
1407                 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
1408                 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
1409         }
1410 
1411         WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
1412 
1413         iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
1414         iocfc->faa_args.busy = BFA_FALSE;
1415 }
1416 
1417 
1418 
1419 
1420 static void
1421 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1422 {
1423         struct bfa_s    *bfa = bfa_arg;
1424 
1425         if (status == BFA_STATUS_OK)
1426                 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
1427         else
1428                 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1429 }
1430 
1431 
1432 
1433 
1434 static void
1435 bfa_iocfc_disable_cbfn(void *bfa_arg)
1436 {
1437         struct bfa_s    *bfa = bfa_arg;
1438 
1439         bfa->queue_process = BFA_FALSE;
1440         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
1441 }
1442 
1443 
1444 
1445 
1446 static void
1447 bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1448 {
1449         struct bfa_s    *bfa = bfa_arg;
1450 
1451         bfa->queue_process = BFA_FALSE;
1452         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1453 }
1454 
1455 
1456 
1457 
1458 static void
1459 bfa_iocfc_reset_cbfn(void *bfa_arg)
1460 {
1461         struct bfa_s    *bfa = bfa_arg;
1462 
1463         bfa_iocfc_reset_queues(bfa);
1464         bfa_isr_enable(bfa);
1465 }
1466 
1467 
1468 
1469 
1470 void
1471 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1472                   struct bfa_s *bfa)
1473 {
1474         int q, per_reqq_sz, per_rspq_sz;
1475         struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1476         struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1477         struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1478         u32     dm_len = 0;
1479 
1480         
1481         bfa_mem_dma_setup(meminfo, ioc_dma,
1482                 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1483 
1484         
1485         per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1486                                 BFA_DMA_ALIGN_SZ);
1487         per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1488                                 BFA_DMA_ALIGN_SZ);
1489 
1490         for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1491                 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1492                                 per_reqq_sz);
1493                 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1494                                 per_rspq_sz);
1495         }
1496 
1497         
1498         for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1499                 dm_len += (2 * BFA_CACHELINE_SZ);
1500 
1501         
1502         dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1503         dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1504                         BFA_CACHELINE_SZ);
1505 
1506         
1507         bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1508 
1509         
1510         bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
1511 }
1512 
1513 
1514 
1515 
1516 void
1517 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1518                  struct bfa_pcidev_s *pcidev)
1519 {
1520         int             i;
1521         struct bfa_ioc_s *ioc = &bfa->ioc;
1522 
1523         bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1524         bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1525         bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1526         bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1527 
1528         ioc->trcmod = bfa->trcmod;
1529         bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1530 
1531         bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
1532         bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1533 
1534         bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
1535         bfa_iocfc_mem_claim(bfa, cfg);
1536         INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
1537 
1538         INIT_LIST_HEAD(&bfa->comp_q);
1539         for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1540                 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1541 
1542         bfa->iocfc.cb_reqd = BFA_FALSE;
1543         bfa->iocfc.op_status = BFA_STATUS_OK;
1544         bfa->iocfc.submod_enabled = BFA_FALSE;
1545 
1546         bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
1547 }
1548 
1549 
1550 
1551 
1552 void
1553 bfa_iocfc_init(struct bfa_s *bfa)
1554 {
1555         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
1556 }
1557 
1558 
1559 
1560 
1561 
1562 void
1563 bfa_iocfc_start(struct bfa_s *bfa)
1564 {
1565         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
1566 }
1567 
1568 
1569 
1570 
1571 
1572 void
1573 bfa_iocfc_stop(struct bfa_s *bfa)
1574 {
1575         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
1576 }
1577 
1578 void
1579 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1580 {
1581         struct bfa_s            *bfa = bfaarg;
1582         struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1583         union bfi_iocfc_i2h_msg_u       *msg;
1584 
1585         msg = (union bfi_iocfc_i2h_msg_u *) m;
1586         bfa_trc(bfa, msg->mh.msg_id);
1587 
1588         switch (msg->mh.msg_id) {
1589         case BFI_IOCFC_I2H_CFG_REPLY:
1590                 bfa_iocfc_cfgrsp(bfa);
1591                 break;
1592         case BFI_IOCFC_I2H_UPDATEQ_RSP:
1593                 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1594                 break;
1595         case BFI_IOCFC_I2H_ADDR_MSG:
1596                 bfa_iocfc_process_faa_addr(bfa,
1597                                 (struct bfi_faa_addr_msg_s *)msg);
1598                 break;
1599         case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1600                 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1601                 break;
1602         default:
1603                 WARN_ON(1);
1604         }
1605 }
1606 
1607 void
1608 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1609 {
1610         struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1611 
1612         attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1613 
1614         attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
1615                                 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1616                                 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
1617 
1618         attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
1619                         be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1620                         be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
1621 
1622         attr->config    = iocfc->cfg;
1623 }
1624 
1625 bfa_status_t
1626 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1627 {
1628         struct bfa_iocfc_s              *iocfc = &bfa->iocfc;
1629         struct bfi_iocfc_set_intr_req_s *m;
1630 
1631         iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
1632         iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1633         iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
1634 
1635         if (!bfa_iocfc_is_operational(bfa))
1636                 return BFA_STATUS_OK;
1637 
1638         m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1639         if (!m)
1640                 return BFA_STATUS_DEVBUSY;
1641 
1642         bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1643                     bfa_fn_lpu(bfa));
1644         m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1645         m->delay    = iocfc->cfginfo->intr_attr.delay;
1646         m->latency  = iocfc->cfginfo->intr_attr.latency;
1647 
1648         bfa_trc(bfa, attr->delay);
1649         bfa_trc(bfa, attr->latency);
1650 
1651         bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
1652         return BFA_STATUS_OK;
1653 }
1654 
1655 void
1656 bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
1657 {
1658         struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1659 
1660         iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1661         bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
1662 }
1663 
1664 
1665 
1666 void
1667 bfa_iocfc_enable(struct bfa_s *bfa)
1668 {
1669         bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1670                      "IOC Enable");
1671         bfa->iocfc.cb_reqd = BFA_TRUE;
1672         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
1673 }
1674 
1675 void
1676 bfa_iocfc_disable(struct bfa_s *bfa)
1677 {
1678         bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1679                      "IOC Disable");
1680 
1681         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
1682 }
1683 
1684 bfa_boolean_t
1685 bfa_iocfc_is_operational(struct bfa_s *bfa)
1686 {
1687         return bfa_ioc_is_operational(&bfa->ioc) &&
1688                 bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
1689 }
1690 
1691 
1692 
1693 
1694 void
1695 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1696 {
1697         struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1698         struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1699         int i;
1700 
1701         if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1702                 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1703                 *nwwns = cfgrsp->pbc_cfg.nbluns;
1704                 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1705                         wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1706 
1707                 return;
1708         }
1709 
1710         *nwwns = cfgrsp->bootwwns.nwwns;
1711         memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1712 }
1713 
1714 int
1715 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1716 {
1717         struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1718         struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1719 
1720         memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1721         return cfgrsp->pbc_cfg.nvports;
1722 }
1723 
1724 
1725 
1726 
1727 
1728 
1729 
1730 
1731 
1732 
1733 
1734 
1735 
1736 
1737 
1738 
1739 
1740 
1741 
1742 
1743 
1744 
1745 
1746 
1747 
1748 
1749 
1750 
1751 
1752 
1753 
1754 
1755 
1756 void
1757 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1758                 struct bfa_s *bfa)
1759 {
1760         struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1761         struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
1762         struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
1763         struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
1764         struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1765         struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1766         struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1767         struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
1768 
1769         WARN_ON((cfg == NULL) || (meminfo == NULL));
1770 
1771         memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1772 
1773         
1774         INIT_LIST_HEAD(&meminfo->dma_info.qe);
1775         INIT_LIST_HEAD(&meminfo->kva_info.qe);
1776 
1777         bfa_iocfc_meminfo(cfg, meminfo, bfa);
1778         bfa_sgpg_meminfo(cfg, meminfo, bfa);
1779         bfa_fcport_meminfo(cfg, meminfo, bfa);
1780         bfa_fcxp_meminfo(cfg, meminfo, bfa);
1781         bfa_lps_meminfo(cfg, meminfo, bfa);
1782         bfa_uf_meminfo(cfg, meminfo, bfa);
1783         bfa_rport_meminfo(cfg, meminfo, bfa);
1784         bfa_fcp_meminfo(cfg, meminfo, bfa);
1785         bfa_dconf_meminfo(cfg, meminfo, bfa);
1786 
1787         
1788         bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1789         bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
1790         bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
1791         bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
1792         bfa_mem_dma_setup(meminfo, flash_dma,
1793                           bfa_flash_meminfo(cfg->drvcfg.min_cfg));
1794         bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1795         bfa_mem_dma_setup(meminfo, phy_dma,
1796                           bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1797         bfa_mem_dma_setup(meminfo, fru_dma,
1798                           bfa_fru_meminfo(cfg->drvcfg.min_cfg));
1799 }
1800 
1801 
1802 
1803 
1804 
1805 
1806 
1807 
1808 
1809 
1810 
1811 
1812 
1813 
1814 
1815 
1816 
1817 
1818 
1819 
1820 
1821 
1822 
1823 
1824 
1825 
1826 
1827 void
1828 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1829                struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1830 {
1831         struct bfa_mem_dma_s *dma_info, *dma_elem;
1832         struct bfa_mem_kva_s *kva_info, *kva_elem;
1833         struct list_head *dm_qe, *km_qe;
1834 
1835         bfa->fcs = BFA_FALSE;
1836 
1837         WARN_ON((cfg == NULL) || (meminfo == NULL));
1838 
1839         
1840         dma_info = &meminfo->dma_info;
1841         dma_info->kva_curp = dma_info->kva;
1842         dma_info->dma_curp = dma_info->dma;
1843 
1844         kva_info = &meminfo->kva_info;
1845         kva_info->kva_curp = kva_info->kva;
1846 
1847         list_for_each(dm_qe, &dma_info->qe) {
1848                 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1849                 dma_elem->kva_curp = dma_elem->kva;
1850                 dma_elem->dma_curp = dma_elem->dma;
1851         }
1852 
1853         list_for_each(km_qe, &kva_info->qe) {
1854                 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1855                 kva_elem->kva_curp = kva_elem->kva;
1856         }
1857 
1858         bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
1859         bfa_fcdiag_attach(bfa, bfad, cfg, pcidev);
1860         bfa_sgpg_attach(bfa, bfad, cfg, pcidev);
1861         bfa_fcport_attach(bfa, bfad, cfg, pcidev);
1862         bfa_fcxp_attach(bfa, bfad, cfg, pcidev);
1863         bfa_lps_attach(bfa, bfad, cfg, pcidev);
1864         bfa_uf_attach(bfa, bfad, cfg, pcidev);
1865         bfa_rport_attach(bfa, bfad, cfg, pcidev);
1866         bfa_fcp_attach(bfa, bfad, cfg, pcidev);
1867         bfa_dconf_attach(bfa, bfad, cfg);
1868         bfa_com_port_attach(bfa);
1869         bfa_com_ablk_attach(bfa);
1870         bfa_com_cee_attach(bfa);
1871         bfa_com_sfp_attach(bfa);
1872         bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1873         bfa_com_diag_attach(bfa);
1874         bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1875         bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg);
1876 }
1877 
1878 
1879 
1880 
1881 
1882 
1883 
1884 
1885 
1886 
1887 
1888 
1889 
1890 
1891 void
1892 bfa_detach(struct bfa_s *bfa)
1893 {
1894         bfa_ioc_detach(&bfa->ioc);
1895 }
1896 
1897 void
1898 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1899 {
1900         INIT_LIST_HEAD(comp_q);
1901         list_splice_tail_init(&bfa->comp_q, comp_q);
1902 }
1903 
1904 void
1905 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1906 {
1907         struct list_head                *qe;
1908         struct list_head                *qen;
1909         struct bfa_cb_qe_s      *hcb_qe;
1910         bfa_cb_cbfn_status_t    cbfn;
1911 
1912         list_for_each_safe(qe, qen, comp_q) {
1913                 hcb_qe = (struct bfa_cb_qe_s *) qe;
1914                 if (hcb_qe->pre_rmv) {
1915                         
1916                         list_del(qe);
1917                         cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
1918                         cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
1919                 } else
1920                         hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1921         }
1922 }
1923 
1924 void
1925 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1926 {
1927         struct list_head                *qe;
1928         struct bfa_cb_qe_s      *hcb_qe;
1929 
1930         while (!list_empty(comp_q)) {
1931                 bfa_q_deq(comp_q, &qe);
1932                 hcb_qe = (struct bfa_cb_qe_s *) qe;
1933                 WARN_ON(hcb_qe->pre_rmv);
1934                 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1935         }
1936 }
1937 
1938 
1939 
1940 
1941 
1942 void
1943 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1944 {
1945         static struct bfa_pciid_s __pciids[] = {
1946                 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1947                 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1948                 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1949                 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1950         };
1951 
1952         *npciids = ARRAY_SIZE(__pciids);
1953         *pciids = __pciids;
1954 }
1955 
1956 
1957 
1958 
1959 
1960 
1961 
1962 
1963 
1964 
1965 
1966 
1967 
1968 
1969 void
1970 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1971 {
1972         cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1973         cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1974         cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1975         cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1976         cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1977         cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1978         cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1979         cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1980         cfg->fwcfg.num_fwtio_reqs = 0;
1981 
1982         cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1983         cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1984         cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1985         cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1986         cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1987         cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1988         cfg->drvcfg.ioc_recover = BFA_FALSE;
1989         cfg->drvcfg.delay_comp = BFA_FALSE;
1990 
1991 }
1992 
1993 void
1994 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1995 {
1996         bfa_cfg_get_default(cfg);
1997         cfg->fwcfg.num_ioim_reqs   = BFA_IOIM_MIN;
1998         cfg->fwcfg.num_tskim_reqs  = BFA_TSKIM_MIN;
1999         cfg->fwcfg.num_fcxp_reqs   = BFA_FCXP_MIN;
2000         cfg->fwcfg.num_uf_bufs     = BFA_UF_MIN;
2001         cfg->fwcfg.num_rports      = BFA_RPORT_MIN;
2002         cfg->fwcfg.num_fwtio_reqs = 0;
2003 
2004         cfg->drvcfg.num_sgpgs      = BFA_SGPG_MIN;
2005         cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
2006         cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
2007         cfg->drvcfg.min_cfg        = BFA_TRUE;
2008 }