Lines Matching refs:bp

29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, in storm_memset_vf_to_pf() argument
38 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
40 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
42 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
44 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, in storm_memset_func_en() argument
51 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
53 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
55 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
57 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) in bnx2x_vf_idx_by_abs_fid() argument
65 for_each_vf(bp, idx) in bnx2x_vf_idx_by_abs_fid()
66 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) in bnx2x_vf_idx_by_abs_fid()
72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) in bnx2x_vf_by_abs_fid() argument
74 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); in bnx2x_vf_by_abs_fid()
75 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; in bnx2x_vf_by_abs_fid()
78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_igu_ack_sb() argument
102 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); in bnx2x_vf_igu_ack_sb()
108 REG_WR(bp, igu_addr_ctl, ctl); in bnx2x_vf_igu_ack_sb()
113 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, in bnx2x_validate_vf_sp_objs() argument
128 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_tx() argument
144 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_rx() argument
168 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, in bnx2x_vfop_qctor_prep() argument
233 static int bnx2x_vf_queue_create(struct bnx2x *bp, in bnx2x_vf_queue_create() argument
247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == in bnx2x_vf_queue_create()
255 rc = bnx2x_queue_state_change(bp, q_params); in bnx2x_vf_queue_create()
262 rc = bnx2x_queue_state_change(bp, q_params); in bnx2x_vf_queue_create()
267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), in bnx2x_vf_queue_create()
273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_queue_destroy() argument
289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == in bnx2x_vf_queue_destroy()
298 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_vf_queue_destroy()
315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) in bnx2x_vf_set_igu_info() argument
317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_set_igu_info()
320 if (!BP_VFDB(bp)->first_vf_igu_entry) in bnx2x_vf_set_igu_info()
321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; in bnx2x_vf_set_igu_info()
330 BP_VFDB(bp)->vf_sbs_pool++; in bnx2x_vf_set_igu_info()
333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, in bnx2x_vf_vlan_credit() argument
341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); in bnx2x_vf_vlan_credit()
349 bnx2x_vlan_mac_h_read_unlock(bp, obj); in bnx2x_vf_vlan_credit()
354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_vlan_mac_clear() argument
384 rc = ramrod.vlan_mac_obj->delete_all(bp, in bnx2x_vf_vlan_mac_clear()
398 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, in bnx2x_vf_mac_vlan_config() argument
436 rc = bnx2x_config_vlan_mac(bp, &ramrod); in bnx2x_vf_mac_vlan_config()
450 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mac_vlan_config_list() argument
458 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_vf_mac_vlan_config_list()
463 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, in bnx2x_vf_mac_vlan_config_list()
475 bnx2x_vf_mac_vlan_config(bp, vf, qid, in bnx2x_vf_mac_vlan_config_list()
487 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, in bnx2x_vf_queue_setup() argument
494 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); in bnx2x_vf_queue_setup()
499 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, in bnx2x_vf_queue_setup()
507 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_queue_flr() argument
516 bnx2x_validate_vf_sp_objs(bp, vf, false)) { in bnx2x_vf_queue_flr()
517 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, in bnx2x_vf_queue_flr()
521 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, in bnx2x_vf_queue_flr()
525 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, in bnx2x_vf_queue_flr()
540 rc = bnx2x_queue_state_change(bp, &qstate); in bnx2x_vf_queue_flr()
551 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mcast() argument
579 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); in bnx2x_vf_mcast()
597 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); in bnx2x_vf_mcast()
606 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, in bnx2x_vf_prep_rx_mode() argument
616 ramrod->rx_mode_obj = &bp->rx_mode_obj; in bnx2x_vf_prep_rx_mode()
627 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); in bnx2x_vf_prep_rx_mode()
628 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); in bnx2x_vf_prep_rx_mode()
631 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_rxmode() argument
638 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); in bnx2x_vf_rxmode()
641 return bnx2x_config_rx_mode(bp, &ramrod); in bnx2x_vf_rxmode()
644 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) in bnx2x_vf_queue_teardown() argument
652 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); in bnx2x_vf_queue_teardown()
657 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { in bnx2x_vf_queue_teardown()
658 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
663 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
668 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
673 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); in bnx2x_vf_queue_teardown()
680 rc = bnx2x_vf_queue_destroy(bp, vf, qid); in bnx2x_vf_queue_teardown()
698 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) in bnx2x_vf_enable_internal() argument
700 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); in bnx2x_vf_enable_internal()
704 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_semi_clear_err() argument
706 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
707 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
708 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
709 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
712 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_pglue_clear_err() argument
714 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; in bnx2x_vf_pglue_clear_err()
731 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); in bnx2x_vf_pglue_clear_err()
734 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_igu_reset() argument
740 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_igu_reset()
742 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); in bnx2x_vf_igu_reset()
743 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); in bnx2x_vf_igu_reset()
744 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); in bnx2x_vf_igu_reset()
745 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); in bnx2x_vf_igu_reset()
746 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); in bnx2x_vf_igu_reset()
747 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); in bnx2x_vf_igu_reset()
749 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); in bnx2x_vf_igu_reset()
752 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; in bnx2x_vf_igu_reset()
753 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); in bnx2x_vf_igu_reset()
759 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_igu_reset()
766 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); in bnx2x_vf_igu_reset()
769 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, in bnx2x_vf_igu_reset()
773 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, in bnx2x_vf_igu_reset()
778 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_enable_access() argument
781 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); in bnx2x_vf_enable_access()
782 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); in bnx2x_vf_enable_access()
785 bnx2x_vf_semi_clear_err(bp, abs_vfid); in bnx2x_vf_enable_access()
786 bnx2x_vf_pglue_clear_err(bp, abs_vfid); in bnx2x_vf_enable_access()
789 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); in bnx2x_vf_enable_access()
791 bnx2x_vf_enable_internal(bp, true); in bnx2x_vf_enable_access()
792 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_enable_access()
795 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_enable_traffic() argument
798 bnx2x_vf_igu_reset(bp, vf); in bnx2x_vf_enable_traffic()
801 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_enable_traffic()
802 REG_WR(bp, PBF_REG_DISABLE_VF, 0); in bnx2x_vf_enable_traffic()
803 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_enable_traffic()
806 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_is_pcie_pending() argument
809 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_is_pcie_pending()
820 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_flr_clnup_epilog() argument
823 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) in bnx2x_vf_flr_clnup_epilog()
833 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_iov_static_resc() argument
852 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_free_resc() argument
855 bnx2x_iov_static_resc(bp, vf); in bnx2x_vf_free_resc()
859 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_flr_clnup_hw() argument
861 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); in bnx2x_vf_flr_clnup_hw()
864 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_flr_clnup_hw()
865 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, in bnx2x_vf_flr_clnup_hw()
868 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_flr_clnup_hw()
871 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), in bnx2x_vf_flr_clnup_hw()
876 bnx2x_tx_hw_flushed(bp, poll_cnt); in bnx2x_vf_flr_clnup_hw()
879 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_flr() argument
889 rc = bnx2x_vf_queue_flr(bp, vf, i); in bnx2x_vf_flr()
895 bnx2x_vf_mcast(bp, vf, NULL, 0, true); in bnx2x_vf_flr()
898 bnx2x_vf_flr_clnup_hw(bp, vf); in bnx2x_vf_flr()
901 bnx2x_vf_free_resc(bp, vf); in bnx2x_vf_flr()
904 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); in bnx2x_vf_flr()
911 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) in bnx2x_vf_flr_clnup() argument
916 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { in bnx2x_vf_flr_clnup()
918 if (bnx2x_vf(bp, i, state) != VF_RESET || in bnx2x_vf_flr_clnup()
919 !bnx2x_vf(bp, i, flr_clnup_stage)) in bnx2x_vf_flr_clnup()
923 i, BNX2X_NR_VIRTFN(bp)); in bnx2x_vf_flr_clnup()
925 vf = BP_VF(bp, i); in bnx2x_vf_flr_clnup()
928 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); in bnx2x_vf_flr_clnup()
931 bnx2x_vf_flr(bp, vf); in bnx2x_vf_flr_clnup()
935 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); in bnx2x_vf_flr_clnup()
946 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); in bnx2x_vf_flr_clnup()
948 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], in bnx2x_vf_flr_clnup()
949 bp->vfdb->flrd_vfs[i]); in bnx2x_vf_flr_clnup()
951 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); in bnx2x_vf_flr_clnup()
957 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); in bnx2x_vf_flr_clnup()
960 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) in bnx2x_vf_handle_flr_event() argument
966 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); in bnx2x_vf_handle_flr_event()
970 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); in bnx2x_vf_handle_flr_event()
972 for_each_vf(bp, i) { in bnx2x_vf_handle_flr_event()
973 struct bnx2x_virtf *vf = BP_VF(bp, i); in bnx2x_vf_handle_flr_event()
977 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); in bnx2x_vf_handle_flr_event()
979 reset = bp->vfdb->flrd_vfs[1] & in bnx2x_vf_handle_flr_event()
994 bnx2x_vf_flr_clnup(bp); in bnx2x_vf_handle_flr_event()
998 void bnx2x_iov_init_dq(struct bnx2x *bp) in bnx2x_iov_init_dq() argument
1000 if (!IS_SRIOV(bp)) in bnx2x_iov_init_dq()
1004 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); in bnx2x_iov_init_dq()
1005 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); in bnx2x_iov_init_dq()
1010 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); in bnx2x_iov_init_dq()
1013 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); in bnx2x_iov_init_dq()
1018 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); in bnx2x_iov_init_dq()
1024 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); in bnx2x_iov_init_dq()
1025 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); in bnx2x_iov_init_dq()
1026 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); in bnx2x_iov_init_dq()
1027 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); in bnx2x_iov_init_dq()
1032 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); in bnx2x_iov_init_dq()
1035 void bnx2x_iov_init_dmae(struct bnx2x *bp) in bnx2x_iov_init_dmae() argument
1037 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) in bnx2x_iov_init_dmae()
1038 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); in bnx2x_iov_init_dmae()
1041 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) in bnx2x_vf_bus() argument
1043 struct pci_dev *dev = bp->pdev; in bnx2x_vf_bus()
1044 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_bus()
1050 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) in bnx2x_vf_devfn() argument
1052 struct pci_dev *dev = bp->pdev; in bnx2x_vf_devfn()
1053 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_devfn()
1058 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_set_bars() argument
1061 struct pci_dev *dev = bp->pdev; in bnx2x_vf_set_bars()
1062 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_set_bars()
1080 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) in bnx2x_get_vf_igu_cam_info() argument
1088 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); in bnx2x_get_vf_igu_cam_info()
1094 else if (current_pf == BP_FUNC(bp)) in bnx2x_get_vf_igu_cam_info()
1095 bnx2x_vf_set_igu_info(bp, sb_id, in bnx2x_get_vf_igu_cam_info()
1103 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); in bnx2x_get_vf_igu_cam_info()
1104 return BP_VFDB(bp)->vf_sbs_pool; in bnx2x_get_vf_igu_cam_info()
1107 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) in __bnx2x_iov_free_vfdb() argument
1109 if (bp->vfdb) { in __bnx2x_iov_free_vfdb()
1110 kfree(bp->vfdb->vfqs); in __bnx2x_iov_free_vfdb()
1111 kfree(bp->vfdb->vfs); in __bnx2x_iov_free_vfdb()
1112 kfree(bp->vfdb); in __bnx2x_iov_free_vfdb()
1114 bp->vfdb = NULL; in __bnx2x_iov_free_vfdb()
1117 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) in bnx2x_sriov_pci_cfg_info() argument
1120 struct pci_dev *dev = bp->pdev; in bnx2x_sriov_pci_cfg_info()
1142 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) in bnx2x_sriov_info() argument
1150 if (bnx2x_sriov_pci_cfg_info(bp, iov)) in bnx2x_sriov_info()
1157 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); in bnx2x_sriov_info()
1159 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); in bnx2x_sriov_info()
1163 BP_FUNC(bp), in bnx2x_sriov_info()
1171 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, in bnx2x_iov_init_one() argument
1176 struct pci_dev *dev = bp->pdev; in bnx2x_iov_init_one()
1178 bp->vfdb = NULL; in bnx2x_iov_init_one()
1181 if (IS_VF(bp)) in bnx2x_iov_init_one()
1189 if (CHIP_IS_E1x(bp)) in bnx2x_iov_init_one()
1197 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { in bnx2x_iov_init_one()
1199 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); in bnx2x_iov_init_one()
1212 if (!bnx2x_ari_enabled(bp->pdev)) { in bnx2x_iov_init_one()
1218 if (CHIP_INT_MODE_IS_BC(bp)) { in bnx2x_iov_init_one()
1224 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); in bnx2x_iov_init_one()
1225 if (!bp->vfdb) { in bnx2x_iov_init_one()
1236 iov = &(bp->vfdb->sriov); in bnx2x_iov_init_one()
1237 err = bnx2x_sriov_info(bp, iov); in bnx2x_iov_init_one()
1251 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * in bnx2x_iov_init_one()
1252 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); in bnx2x_iov_init_one()
1253 if (!bp->vfdb->vfs) { in bnx2x_iov_init_one()
1260 for_each_vf(bp, i) { in bnx2x_iov_init_one()
1261 bnx2x_vf(bp, i, index) = i; in bnx2x_iov_init_one()
1262 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; in bnx2x_iov_init_one()
1263 bnx2x_vf(bp, i, state) = VF_FREE; in bnx2x_iov_init_one()
1264 mutex_init(&bnx2x_vf(bp, i, op_mutex)); in bnx2x_iov_init_one()
1265 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; in bnx2x_iov_init_one()
1269 if (!bnx2x_get_vf_igu_cam_info(bp)) { in bnx2x_iov_init_one()
1276 bp->vfdb->vfqs = kzalloc( in bnx2x_iov_init_one()
1280 if (!bp->vfdb->vfqs) { in bnx2x_iov_init_one()
1287 mutex_init(&bp->vfdb->event_mutex); in bnx2x_iov_init_one()
1289 mutex_init(&bp->vfdb->bulletin_mutex); in bnx2x_iov_init_one()
1291 if (SHMEM2_HAS(bp, sriov_switch_mode)) in bnx2x_iov_init_one()
1292 SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB); in bnx2x_iov_init_one()
1297 __bnx2x_iov_free_vfdb(bp); in bnx2x_iov_init_one()
1301 void bnx2x_iov_remove_one(struct bnx2x *bp) in bnx2x_iov_remove_one() argument
1306 if (!IS_SRIOV(bp)) in bnx2x_iov_remove_one()
1309 bnx2x_disable_sriov(bp); in bnx2x_iov_remove_one()
1312 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { in bnx2x_iov_remove_one()
1313 bnx2x_pretend_func(bp, in bnx2x_iov_remove_one()
1314 HW_VF_HANDLE(bp, in bnx2x_iov_remove_one()
1315 bp->vfdb->sriov.first_vf_in_pf + in bnx2x_iov_remove_one()
1318 bp->vfdb->sriov.first_vf_in_pf + vf_idx); in bnx2x_iov_remove_one()
1319 bnx2x_vf_enable_internal(bp, 0); in bnx2x_iov_remove_one()
1320 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_iov_remove_one()
1324 __bnx2x_iov_free_vfdb(bp); in bnx2x_iov_remove_one()
1327 void bnx2x_iov_free_mem(struct bnx2x *bp) in bnx2x_iov_free_mem() argument
1331 if (!IS_SRIOV(bp)) in bnx2x_iov_free_mem()
1336 struct hw_dma *cxt = &bp->vfdb->context[i]; in bnx2x_iov_free_mem()
1340 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, in bnx2x_iov_free_mem()
1341 BP_VFDB(bp)->sp_dma.mapping, in bnx2x_iov_free_mem()
1342 BP_VFDB(bp)->sp_dma.size); in bnx2x_iov_free_mem()
1344 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, in bnx2x_iov_free_mem()
1345 BP_VF_MBX_DMA(bp)->mapping, in bnx2x_iov_free_mem()
1346 BP_VF_MBX_DMA(bp)->size); in bnx2x_iov_free_mem()
1348 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, in bnx2x_iov_free_mem()
1349 BP_VF_BULLETIN_DMA(bp)->mapping, in bnx2x_iov_free_mem()
1350 BP_VF_BULLETIN_DMA(bp)->size); in bnx2x_iov_free_mem()
1353 int bnx2x_iov_alloc_mem(struct bnx2x *bp) in bnx2x_iov_alloc_mem() argument
1358 if (!IS_SRIOV(bp)) in bnx2x_iov_alloc_mem()
1362 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * in bnx2x_iov_alloc_mem()
1366 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); in bnx2x_iov_alloc_mem()
1381 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); in bnx2x_iov_alloc_mem()
1382 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, in bnx2x_iov_alloc_mem()
1384 if (!BP_VFDB(bp)->sp_dma.addr) in bnx2x_iov_alloc_mem()
1386 BP_VFDB(bp)->sp_dma.size = tot_size; in bnx2x_iov_alloc_mem()
1389 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; in bnx2x_iov_alloc_mem()
1390 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, in bnx2x_iov_alloc_mem()
1392 if (!BP_VF_MBX_DMA(bp)->addr) in bnx2x_iov_alloc_mem()
1395 BP_VF_MBX_DMA(bp)->size = tot_size; in bnx2x_iov_alloc_mem()
1398 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; in bnx2x_iov_alloc_mem()
1399 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, in bnx2x_iov_alloc_mem()
1401 if (!BP_VF_BULLETIN_DMA(bp)->addr) in bnx2x_iov_alloc_mem()
1404 BP_VF_BULLETIN_DMA(bp)->size = tot_size; in bnx2x_iov_alloc_mem()
1412 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfq_init() argument
1423 bnx2x_init_queue_obj(bp, &q->sp_obj, in bnx2x_vfq_init()
1425 bnx2x_vf_sp(bp, vf, q_data), in bnx2x_vfq_init()
1426 bnx2x_vf_sp_map(bp, vf, q_data), in bnx2x_vfq_init()
1437 static int bnx2x_max_speed_cap(struct bnx2x *bp) in bnx2x_max_speed_cap() argument
1439 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; in bnx2x_max_speed_cap()
1448 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) in bnx2x_iov_link_update_vf() argument
1450 struct bnx2x_link_report_data *state = &bp->last_reported_link; in bnx2x_iov_link_update_vf()
1457 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); in bnx2x_iov_link_update_vf()
1461 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_iov_link_update_vf()
1487 bulletin->link_speed = bnx2x_max_speed_cap(bp); in bnx2x_iov_link_update_vf()
1499 rc = bnx2x_post_vf_bulletin(bp, idx); in bnx2x_iov_link_update_vf()
1507 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_iov_link_update_vf()
1513 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_link_state() local
1514 struct bnx2x_virtf *vf = BP_VF(bp, idx); in bnx2x_set_vf_link_state()
1524 return bnx2x_iov_link_update_vf(bp, idx); in bnx2x_set_vf_link_state()
1527 void bnx2x_iov_link_update(struct bnx2x *bp) in bnx2x_iov_link_update() argument
1531 if (!IS_SRIOV(bp)) in bnx2x_iov_link_update()
1534 for_each_vf(bp, vfid) in bnx2x_iov_link_update()
1535 bnx2x_iov_link_update_vf(bp, vfid); in bnx2x_iov_link_update()
1539 int bnx2x_iov_nic_init(struct bnx2x *bp) in bnx2x_iov_nic_init() argument
1543 if (!IS_SRIOV(bp)) { in bnx2x_iov_nic_init()
1548 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); in bnx2x_iov_nic_init()
1554 for_each_vf(bp, vfid) { in bnx2x_iov_nic_init()
1555 struct bnx2x_virtf *vf = BP_VF(bp, vfid); in bnx2x_iov_nic_init()
1557 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * in bnx2x_iov_nic_init()
1561 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + in bnx2x_iov_nic_init()
1570 bnx2x_iov_static_resc(bp, vf); in bnx2x_iov_nic_init()
1574 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); in bnx2x_iov_nic_init()
1587 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, in bnx2x_iov_nic_init()
1589 bnx2x_vf_sp(bp, vf, mcast_rdata), in bnx2x_iov_nic_init()
1590 bnx2x_vf_sp_map(bp, vf, mcast_rdata), in bnx2x_iov_nic_init()
1596 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) in bnx2x_iov_nic_init()
1597 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * in bnx2x_iov_nic_init()
1600 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + in bnx2x_iov_nic_init()
1604 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); in bnx2x_iov_nic_init()
1608 for_each_vf(bp, vfid) { in bnx2x_iov_nic_init()
1609 struct bnx2x_virtf *vf = BP_VF(bp, vfid); in bnx2x_iov_nic_init()
1612 vf->bus = bnx2x_vf_bus(bp, vfid); in bnx2x_iov_nic_init()
1613 vf->devfn = bnx2x_vf_devfn(bp, vfid); in bnx2x_iov_nic_init()
1614 bnx2x_vf_set_bars(bp, vf); in bnx2x_iov_nic_init()
1628 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) in bnx2x_iov_chip_cleanup() argument
1632 if (!IS_SRIOV(bp)) in bnx2x_iov_chip_cleanup()
1636 for_each_vf(bp, i) in bnx2x_iov_chip_cleanup()
1637 bnx2x_vf_release(bp, BP_VF(bp, i)); in bnx2x_iov_chip_cleanup()
1643 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) in bnx2x_iov_init_ilt() argument
1646 struct bnx2x_ilt *ilt = BP_ILT(bp); in bnx2x_iov_init_ilt()
1648 if (!IS_SRIOV(bp)) in bnx2x_iov_init_ilt()
1653 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); in bnx2x_iov_init_ilt()
1662 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) in bnx2x_iov_is_vf_cid() argument
1669 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, in bnx2x_vf_handle_classification_eqe() argument
1681 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, in bnx2x_vf_handle_classification_eqe()
1685 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, in bnx2x_vf_handle_classification_eqe()
1700 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, in bnx2x_vf_handle_mcast_eqe() argument
1711 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_vf_handle_mcast_eqe()
1719 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, in bnx2x_vf_handle_filters_eqe() argument
1727 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, in bnx2x_vf_handle_rss_update_eqe() argument
1733 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) in bnx2x_iov_eq_sp_event() argument
1740 if (!IS_SRIOV(bp)) in bnx2x_iov_eq_sp_event()
1778 if (!bnx2x_iov_is_vf_cid(bp, cid)) { in bnx2x_iov_eq_sp_event()
1790 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_iov_eq_sp_event()
1802 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, in bnx2x_iov_eq_sp_event()
1810 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); in bnx2x_iov_eq_sp_event()
1815 bnx2x_vf_handle_mcast_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1820 bnx2x_vf_handle_filters_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1825 bnx2x_vf_handle_rss_update_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1835 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) in bnx2x_vf_by_cid() argument
1842 return bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_by_cid()
1845 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, in bnx2x_iov_set_queue_sp_obj() argument
1850 if (!IS_SRIOV(bp)) in bnx2x_iov_set_queue_sp_obj()
1853 vf = bnx2x_vf_by_cid(bp, vf_cid); in bnx2x_iov_set_queue_sp_obj()
1867 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) in bnx2x_iov_adjust_stats_req() argument
1876 if (!IS_SRIOV(bp)) in bnx2x_iov_adjust_stats_req()
1879 if (!NO_FCOE(bp)) in bnx2x_iov_adjust_stats_req()
1883 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; in bnx2x_iov_adjust_stats_req()
1889 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, in bnx2x_iov_adjust_stats_req()
1892 cur_data_offset = bp->fw_stats_data_mapping + in bnx2x_iov_adjust_stats_req()
1896 cur_query_entry = &bp->fw_stats_req-> in bnx2x_iov_adjust_stats_req()
1899 for_each_vf(bp, i) { in bnx2x_iov_adjust_stats_req()
1901 struct bnx2x_virtf *vf = BP_VF(bp, i); in bnx2x_iov_adjust_stats_req()
1918 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == in bnx2x_iov_adjust_stats_req()
1945 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; in bnx2x_iov_adjust_stats_req()
1949 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, in bnx2x_vf_qtbl_set_q() argument
1955 REG_WR(bp, reg, val); in bnx2x_vf_qtbl_set_q()
1958 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_clr_qtbl() argument
1963 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, in bnx2x_vf_clr_qtbl()
1967 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_igu_disable() argument
1972 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_igu_disable()
1973 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); in bnx2x_vf_igu_disable()
1976 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); in bnx2x_vf_igu_disable()
1977 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_igu_disable()
1980 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_max_queue_cnt() argument
1987 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_chk_avail_resc() argument
1990 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_chk_avail_resc()
1991 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_chk_avail_resc()
2001 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_acquire() argument
2004 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * in bnx2x_vf_acquire()
2008 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + in bnx2x_vf_acquire()
2021 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { in bnx2x_vf_acquire()
2040 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { in bnx2x_vf_acquire()
2049 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_acquire()
2050 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_acquire()
2080 bnx2x_vfq_init(bp, vf, q); in bnx2x_vf_acquire()
2086 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) in bnx2x_vf_init() argument
2095 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, in bnx2x_vf_init()
2109 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) in bnx2x_vf_init()
2113 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); in bnx2x_vf_init()
2116 func_init.pf_id = BP_FUNC(bp); in bnx2x_vf_init()
2118 bnx2x_func_init(bp, &func_init); in bnx2x_vf_init()
2121 bnx2x_vf_enable_access(bp, vf->abs_vfid); in bnx2x_vf_init()
2122 bnx2x_vf_enable_traffic(bp, vf); in bnx2x_vf_init()
2126 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, in bnx2x_vf_init()
2132 bnx2x_post_vf_bulletin(bp, vf->index); in bnx2x_vf_init()
2149 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_close() argument
2157 rc = bnx2x_vf_queue_teardown(bp, vf, i); in bnx2x_vf_close()
2164 bnx2x_vf_igu_disable(bp, vf); in bnx2x_vf_close()
2168 bnx2x_vf_clr_qtbl(bp, vf); in bnx2x_vf_close()
2179 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); in bnx2x_vf_close()
2196 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_free() argument
2209 rc = bnx2x_vf_close(bp, vf); in bnx2x_vf_free()
2215 bnx2x_vf_free_resc(bp, vf); in bnx2x_vf_free()
2229 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_rss_update() argument
2234 return bnx2x_config_rss(bp, rss); in bnx2x_vf_rss_update()
2237 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_tpa_update() argument
2260 rc = bnx2x_queue_state_change(bp, &qstate); in bnx2x_vf_tpa_update()
2276 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_release() argument
2281 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); in bnx2x_vf_release()
2283 rc = bnx2x_vf_free(bp, vf); in bnx2x_vf_release()
2288 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); in bnx2x_vf_release()
2292 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_lock_vf_pf_channel() argument
2312 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_unlock_vf_pf_channel() argument
2343 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) in bnx2x_set_pf_tx_switching() argument
2350 prev_flags = bp->flags; in bnx2x_set_pf_tx_switching()
2352 bp->flags |= TX_SWITCHING; in bnx2x_set_pf_tx_switching()
2354 bp->flags &= ~TX_SWITCHING; in bnx2x_set_pf_tx_switching()
2355 if (prev_flags == bp->flags) in bnx2x_set_pf_tx_switching()
2359 if ((bp->state != BNX2X_STATE_OPEN) || in bnx2x_set_pf_tx_switching()
2360 (bnx2x_get_q_logical_state(bp, in bnx2x_set_pf_tx_switching()
2361 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != in bnx2x_set_pf_tx_switching()
2379 for_each_eth_queue(bp, i) { in bnx2x_set_pf_tx_switching()
2380 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_pf_tx_switching()
2383 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; in bnx2x_set_pf_tx_switching()
2386 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_set_pf_tx_switching()
2399 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); in bnx2x_sriov_configure() local
2401 if (!IS_SRIOV(bp)) { in bnx2x_sriov_configure()
2407 num_vfs_param, BNX2X_NR_VIRTFN(bp)); in bnx2x_sriov_configure()
2410 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_sriov_configure()
2416 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { in bnx2x_sriov_configure()
2418 num_vfs_param, BNX2X_NR_VIRTFN(bp)); in bnx2x_sriov_configure()
2419 num_vfs_param = BNX2X_NR_VIRTFN(bp); in bnx2x_sriov_configure()
2422 bp->requested_nr_virtfn = num_vfs_param; in bnx2x_sriov_configure()
2424 bnx2x_set_pf_tx_switching(bp, false); in bnx2x_sriov_configure()
2425 bnx2x_disable_sriov(bp); in bnx2x_sriov_configure()
2428 return bnx2x_enable_sriov(bp); in bnx2x_sriov_configure()
2434 int bnx2x_enable_sriov(struct bnx2x *bp) in bnx2x_enable_sriov() argument
2436 int rc = 0, req_vfs = bp->requested_nr_virtfn; in bnx2x_enable_sriov()
2444 first_vf = bp->vfdb->sriov.first_vf_in_pf; in bnx2x_enable_sriov()
2448 BP_VFDB(bp)->vf_sbs_pool / req_vfs); in bnx2x_enable_sriov()
2452 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); in bnx2x_enable_sriov()
2455 vf_sb_count(BP_VF(bp, vf_idx)) = 0; in bnx2x_enable_sriov()
2457 bp->vfdb->vf_sbs_pool = 0; in bnx2x_enable_sriov()
2460 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; in bnx2x_enable_sriov()
2469 REG_WR(bp, address, igu_entry); in bnx2x_enable_sriov()
2476 bnx2x_get_vf_igu_cam_info(bp); in bnx2x_enable_sriov()
2479 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); in bnx2x_enable_sriov()
2482 for_each_vf(bp, vf_idx) { in bnx2x_enable_sriov()
2483 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); in bnx2x_enable_sriov()
2486 vf->vfqs = &bp->vfdb->vfqs[qcount]; in bnx2x_enable_sriov()
2488 bnx2x_iov_static_resc(bp, vf); in bnx2x_enable_sriov()
2496 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); in bnx2x_enable_sriov()
2497 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, in bnx2x_enable_sriov()
2502 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_enable_sriov()
2508 bnx2x_disable_sriov(bp); in bnx2x_enable_sriov()
2510 rc = bnx2x_set_pf_tx_switching(bp, true); in bnx2x_enable_sriov()
2514 rc = pci_enable_sriov(bp->pdev, req_vfs); in bnx2x_enable_sriov()
2523 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) in bnx2x_pf_set_vfs_vlan() argument
2529 for_each_vf(bp, vfidx) { in bnx2x_pf_set_vfs_vlan()
2530 bulletin = BP_VF_BULLETIN(bp, vfidx); in bnx2x_pf_set_vfs_vlan()
2532 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); in bnx2x_pf_set_vfs_vlan()
2536 void bnx2x_disable_sriov(struct bnx2x *bp) in bnx2x_disable_sriov() argument
2538 if (pci_vfs_assigned(bp->pdev)) { in bnx2x_disable_sriov()
2544 pci_disable_sriov(bp->pdev); in bnx2x_disable_sriov()
2547 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, in bnx2x_vf_op_prep() argument
2552 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_vf_op_prep()
2557 if (!IS_SRIOV(bp)) { in bnx2x_vf_op_prep()
2562 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { in bnx2x_vf_op_prep()
2564 vfidx, BNX2X_NR_VIRTFN(bp)); in bnx2x_vf_op_prep()
2569 *vf = BP_VF(bp, vfidx); in bnx2x_vf_op_prep()
2570 *bulletin = BP_VF_BULLETIN(bp, vfidx); in bnx2x_vf_op_prep()
2595 struct bnx2x *bp = netdev_priv(dev); in bnx2x_get_vf_config() local
2603 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_get_vf_config()
2621 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { in bnx2x_get_vf_config()
2622 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, in bnx2x_get_vf_config()
2624 vlan_obj->get_n_elements(bp, vlan_obj, 1, in bnx2x_get_vf_config()
2629 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_get_vf_config()
2646 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_get_vf_config()
2671 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_mac() local
2682 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_set_vf_mac()
2686 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_mac()
2695 rc = bnx2x_post_vf_bulletin(bp, vfidx); in bnx2x_set_vf_mac()
2698 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_mac()
2706 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); in bnx2x_set_vf_mac()
2714 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_set_vf_mac()
2718 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); in bnx2x_set_vf_mac()
2722 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); in bnx2x_set_vf_mac()
2730 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); in bnx2x_set_vf_mac()
2739 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, in bnx2x_set_vf_mac()
2743 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); in bnx2x_set_vf_mac()
2749 static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp, in bnx2x_set_vf_vlan_acceptance() argument
2762 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, in bnx2x_set_vf_vlan_acceptance()
2765 bnx2x_config_rx_mode(bp, &rx_ramrod); in bnx2x_set_vf_vlan_acceptance()
2768 static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_set_vf_vlan_filter() argument
2783 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); in bnx2x_set_vf_vlan_filter()
2795 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_vlan() local
2811 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_set_vf_vlan()
2821 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_vlan()
2830 rc = bnx2x_post_vf_bulletin(bp, vfidx); in bnx2x_set_vf_vlan()
2833 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_vlan()
2837 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != in bnx2x_set_vf_vlan()
2842 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_set_vf_vlan()
2846 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); in bnx2x_set_vf_vlan()
2851 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, in bnx2x_set_vf_vlan()
2863 bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan); in bnx2x_set_vf_vlan()
2865 rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true); in bnx2x_set_vf_vlan()
2879 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != in bnx2x_set_vf_vlan()
2914 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_set_vf_vlan()
2922 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); in bnx2x_set_vf_vlan()
2946 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) in bnx2x_sample_bulletin() argument
2958 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, in bnx2x_sample_bulletin()
2961 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); in bnx2x_sample_bulletin()
2963 if (bp->shadow_bulletin.content.crc == crc) in bnx2x_sample_bulletin()
2967 bp->shadow_bulletin.content.crc, crc); in bnx2x_sample_bulletin()
2975 bulletin = &bp->shadow_bulletin.content; in bnx2x_sample_bulletin()
2978 if (bp->old_bulletin.version == bulletin->version) in bnx2x_sample_bulletin()
2983 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { in bnx2x_sample_bulletin()
2985 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); in bnx2x_sample_bulletin()
2992 bp->vf_link_vars.line_speed = bulletin->link_speed; in bnx2x_sample_bulletin()
2993 bp->vf_link_vars.link_report_flags = 0; in bnx2x_sample_bulletin()
2997 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3001 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3005 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3009 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3010 __bnx2x_link_report(bp); in bnx2x_sample_bulletin()
3014 memcpy(&bp->old_bulletin, bulletin, in bnx2x_sample_bulletin()
3020 void bnx2x_timer_sriov(struct bnx2x *bp) in bnx2x_timer_sriov() argument
3022 bnx2x_sample_bulletin(bp); in bnx2x_timer_sriov()
3025 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) in bnx2x_timer_sriov()
3026 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, in bnx2x_timer_sriov()
3030 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) in bnx2x_vf_doorbells() argument
3033 return bp->regview + PXP_VF_ADDR_DB_START; in bnx2x_vf_doorbells()
3036 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) in bnx2x_vf_pci_dealloc() argument
3038 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, in bnx2x_vf_pci_dealloc()
3040 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, in bnx2x_vf_pci_dealloc()
3044 int bnx2x_vf_pci_alloc(struct bnx2x *bp) in bnx2x_vf_pci_alloc() argument
3046 mutex_init(&bp->vf2pf_mutex); in bnx2x_vf_pci_alloc()
3049 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, in bnx2x_vf_pci_alloc()
3051 if (!bp->vf2pf_mbox) in bnx2x_vf_pci_alloc()
3055 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, in bnx2x_vf_pci_alloc()
3057 if (!bp->pf2vf_bulletin) in bnx2x_vf_pci_alloc()
3060 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); in bnx2x_vf_pci_alloc()
3065 bnx2x_vf_pci_dealloc(bp); in bnx2x_vf_pci_alloc()
3069 void bnx2x_iov_channel_down(struct bnx2x *bp) in bnx2x_iov_channel_down() argument
3074 if (!IS_SRIOV(bp)) in bnx2x_iov_channel_down()
3077 for_each_vf(bp, vf_idx) { in bnx2x_iov_channel_down()
3081 bulletin = BP_VF_BULLETIN(bp, vf_idx); in bnx2x_iov_channel_down()
3085 bnx2x_post_vf_bulletin(bp, vf_idx); in bnx2x_iov_channel_down()
3091 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); in bnx2x_iov_task() local
3093 if (!netif_running(bp->dev)) in bnx2x_iov_task()
3097 &bp->iov_task_state)) in bnx2x_iov_task()
3098 bnx2x_vf_handle_flr_event(bp); in bnx2x_iov_task()
3101 &bp->iov_task_state)) in bnx2x_iov_task()
3102 bnx2x_vf_mbx(bp); in bnx2x_iov_task()
3105 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) in bnx2x_schedule_iov_task() argument
3108 set_bit(flag, &bp->iov_task_state); in bnx2x_schedule_iov_task()
3111 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); in bnx2x_schedule_iov_task()