Lines Matching refs:bp

27 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
33 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, in storm_memset_vf_to_pf() argument
36 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
38 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
40 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
42 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
46 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, in storm_memset_func_en() argument
49 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
51 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
53 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
55 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
59 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) in bnx2x_vf_idx_by_abs_fid() argument
63 for_each_vf(bp, idx) in bnx2x_vf_idx_by_abs_fid()
64 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) in bnx2x_vf_idx_by_abs_fid()
70 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) in bnx2x_vf_by_abs_fid() argument
72 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); in bnx2x_vf_by_abs_fid()
73 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; in bnx2x_vf_by_abs_fid()
76 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_igu_ack_sb() argument
100 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); in bnx2x_vf_igu_ack_sb()
106 REG_WR(bp, igu_addr_ctl, ctl); in bnx2x_vf_igu_ack_sb()
111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, in bnx2x_validate_vf_sp_objs() argument
126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_tx() argument
142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_rx() argument
166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, in bnx2x_vfop_qctor_prep() argument
239 static int bnx2x_vf_queue_create(struct bnx2x *bp, in bnx2x_vf_queue_create() argument
253 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == in bnx2x_vf_queue_create()
261 rc = bnx2x_queue_state_change(bp, q_params); in bnx2x_vf_queue_create()
268 rc = bnx2x_queue_state_change(bp, q_params); in bnx2x_vf_queue_create()
273 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), in bnx2x_vf_queue_create()
279 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_queue_destroy() argument
295 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == in bnx2x_vf_queue_destroy()
304 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_vf_queue_destroy()
321 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) in bnx2x_vf_set_igu_info() argument
323 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_set_igu_info()
326 if (!BP_VFDB(bp)->first_vf_igu_entry) in bnx2x_vf_set_igu_info()
327 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; in bnx2x_vf_set_igu_info()
336 BP_VFDB(bp)->vf_sbs_pool++; in bnx2x_vf_set_igu_info()
339 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, in bnx2x_vf_vlan_credit() argument
347 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); in bnx2x_vf_vlan_credit()
355 bnx2x_vlan_mac_h_read_unlock(bp, obj); in bnx2x_vf_vlan_credit()
360 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_vlan_mac_clear() argument
388 rc = ramrod.vlan_mac_obj->delete_all(bp, in bnx2x_vf_vlan_mac_clear()
405 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, in bnx2x_vf_mac_vlan_config() argument
449 rc = bnx2x_config_vlan_mac(bp, &ramrod); in bnx2x_vf_mac_vlan_config()
460 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, in bnx2x_vf_mac_vlan_config()
466 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mac_vlan_config_list() argument
474 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_vf_mac_vlan_config_list()
479 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, in bnx2x_vf_mac_vlan_config_list()
491 bnx2x_vf_mac_vlan_config(bp, vf, qid, in bnx2x_vf_mac_vlan_config_list()
503 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, in bnx2x_vf_queue_setup() argument
510 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); in bnx2x_vf_queue_setup()
522 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); in bnx2x_vf_queue_setup()
529 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, in bnx2x_vf_queue_setup()
537 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_queue_flr() argument
546 bnx2x_validate_vf_sp_objs(bp, vf, false)) { in bnx2x_vf_queue_flr()
547 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); in bnx2x_vf_queue_flr()
550 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); in bnx2x_vf_queue_flr()
564 rc = bnx2x_queue_state_change(bp, &qstate); in bnx2x_vf_queue_flr()
575 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mcast() argument
603 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); in bnx2x_vf_mcast()
621 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); in bnx2x_vf_mcast()
630 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, in bnx2x_vf_prep_rx_mode() argument
640 ramrod->rx_mode_obj = &bp->rx_mode_obj; in bnx2x_vf_prep_rx_mode()
651 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); in bnx2x_vf_prep_rx_mode()
652 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); in bnx2x_vf_prep_rx_mode()
655 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_rxmode() argument
662 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); in bnx2x_vf_rxmode()
665 return bnx2x_config_rx_mode(bp, &ramrod); in bnx2x_vf_rxmode()
668 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) in bnx2x_vf_queue_teardown() argument
676 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); in bnx2x_vf_queue_teardown()
681 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { in bnx2x_vf_queue_teardown()
682 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
686 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
690 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); in bnx2x_vf_queue_teardown()
697 rc = bnx2x_vf_queue_destroy(bp, vf, qid); in bnx2x_vf_queue_teardown()
715 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) in bnx2x_vf_enable_internal() argument
717 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); in bnx2x_vf_enable_internal()
721 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_semi_clear_err() argument
723 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
724 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
725 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
726 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
729 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_pglue_clear_err() argument
731 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; in bnx2x_vf_pglue_clear_err()
748 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); in bnx2x_vf_pglue_clear_err()
751 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_igu_reset() argument
757 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_igu_reset()
759 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); in bnx2x_vf_igu_reset()
760 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); in bnx2x_vf_igu_reset()
761 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); in bnx2x_vf_igu_reset()
762 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); in bnx2x_vf_igu_reset()
763 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); in bnx2x_vf_igu_reset()
764 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); in bnx2x_vf_igu_reset()
766 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); in bnx2x_vf_igu_reset()
771 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; in bnx2x_vf_igu_reset()
772 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); in bnx2x_vf_igu_reset()
778 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_igu_reset()
785 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); in bnx2x_vf_igu_reset()
788 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, in bnx2x_vf_igu_reset()
792 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, in bnx2x_vf_igu_reset()
797 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_enable_access() argument
800 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); in bnx2x_vf_enable_access()
801 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); in bnx2x_vf_enable_access()
804 bnx2x_vf_semi_clear_err(bp, abs_vfid); in bnx2x_vf_enable_access()
805 bnx2x_vf_pglue_clear_err(bp, abs_vfid); in bnx2x_vf_enable_access()
808 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); in bnx2x_vf_enable_access()
810 bnx2x_vf_enable_internal(bp, true); in bnx2x_vf_enable_access()
811 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_enable_access()
814 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_enable_traffic() argument
817 bnx2x_vf_igu_reset(bp, vf); in bnx2x_vf_enable_traffic()
820 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_enable_traffic()
821 REG_WR(bp, PBF_REG_DISABLE_VF, 0); in bnx2x_vf_enable_traffic()
822 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_enable_traffic()
825 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_is_pcie_pending() argument
828 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_is_pcie_pending()
839 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_flr_clnup_epilog() argument
842 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) in bnx2x_vf_flr_clnup_epilog()
848 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, in bnx2x_iov_re_set_vlan_filters() argument
860 rc = bp->vlans_pool.get(&bp->vlans_pool, diff); in bnx2x_iov_re_set_vlan_filters()
862 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); in bnx2x_iov_re_set_vlan_filters()
875 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_iov_static_resc() argument
888 bnx2x_iov_re_set_vlan_filters(bp, vf, 0); in bnx2x_iov_static_resc()
889 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); in bnx2x_iov_static_resc()
891 bnx2x_iov_re_set_vlan_filters(bp, vf, in bnx2x_iov_static_resc()
892 vlan_count / BNX2X_NR_VIRTFN(bp)); in bnx2x_iov_static_resc()
902 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_free_resc() argument
905 bnx2x_iov_static_resc(bp, vf); in bnx2x_vf_free_resc()
909 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_flr_clnup_hw() argument
911 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); in bnx2x_vf_flr_clnup_hw()
914 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_flr_clnup_hw()
915 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, in bnx2x_vf_flr_clnup_hw()
918 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_flr_clnup_hw()
921 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), in bnx2x_vf_flr_clnup_hw()
926 bnx2x_tx_hw_flushed(bp, poll_cnt); in bnx2x_vf_flr_clnup_hw()
929 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_flr() argument
939 rc = bnx2x_vf_queue_flr(bp, vf, i); in bnx2x_vf_flr()
945 bnx2x_vf_mcast(bp, vf, NULL, 0, true); in bnx2x_vf_flr()
948 bnx2x_vf_flr_clnup_hw(bp, vf); in bnx2x_vf_flr()
951 bnx2x_vf_free_resc(bp, vf); in bnx2x_vf_flr()
954 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); in bnx2x_vf_flr()
961 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) in bnx2x_vf_flr_clnup() argument
966 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { in bnx2x_vf_flr_clnup()
968 if (bnx2x_vf(bp, i, state) != VF_RESET || in bnx2x_vf_flr_clnup()
969 !bnx2x_vf(bp, i, flr_clnup_stage)) in bnx2x_vf_flr_clnup()
973 i, BNX2X_NR_VIRTFN(bp)); in bnx2x_vf_flr_clnup()
975 vf = BP_VF(bp, i); in bnx2x_vf_flr_clnup()
978 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); in bnx2x_vf_flr_clnup()
981 bnx2x_vf_flr(bp, vf); in bnx2x_vf_flr_clnup()
985 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); in bnx2x_vf_flr_clnup()
996 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); in bnx2x_vf_flr_clnup()
998 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], in bnx2x_vf_flr_clnup()
999 bp->vfdb->flrd_vfs[i]); in bnx2x_vf_flr_clnup()
1001 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); in bnx2x_vf_flr_clnup()
1007 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); in bnx2x_vf_flr_clnup()
1010 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) in bnx2x_vf_handle_flr_event() argument
1016 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); in bnx2x_vf_handle_flr_event()
1020 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); in bnx2x_vf_handle_flr_event()
1022 for_each_vf(bp, i) { in bnx2x_vf_handle_flr_event()
1023 struct bnx2x_virtf *vf = BP_VF(bp, i); in bnx2x_vf_handle_flr_event()
1027 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); in bnx2x_vf_handle_flr_event()
1029 reset = bp->vfdb->flrd_vfs[1] & in bnx2x_vf_handle_flr_event()
1044 bnx2x_vf_flr_clnup(bp); in bnx2x_vf_handle_flr_event()
1048 void bnx2x_iov_init_dq(struct bnx2x *bp) in bnx2x_iov_init_dq() argument
1050 if (!IS_SRIOV(bp)) in bnx2x_iov_init_dq()
1054 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); in bnx2x_iov_init_dq()
1055 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); in bnx2x_iov_init_dq()
1060 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); in bnx2x_iov_init_dq()
1063 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); in bnx2x_iov_init_dq()
1068 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); in bnx2x_iov_init_dq()
1074 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); in bnx2x_iov_init_dq()
1075 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); in bnx2x_iov_init_dq()
1076 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); in bnx2x_iov_init_dq()
1077 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); in bnx2x_iov_init_dq()
1082 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); in bnx2x_iov_init_dq()
1085 void bnx2x_iov_init_dmae(struct bnx2x *bp) in bnx2x_iov_init_dmae() argument
1087 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) in bnx2x_iov_init_dmae()
1088 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); in bnx2x_iov_init_dmae()
1091 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) in bnx2x_vf_bus() argument
1093 struct pci_dev *dev = bp->pdev; in bnx2x_vf_bus()
1094 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_bus()
1100 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) in bnx2x_vf_devfn() argument
1102 struct pci_dev *dev = bp->pdev; in bnx2x_vf_devfn()
1103 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_devfn()
1108 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_set_bars() argument
1111 struct pci_dev *dev = bp->pdev; in bnx2x_vf_set_bars()
1112 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_set_bars()
1130 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) in bnx2x_get_vf_igu_cam_info() argument
1138 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); in bnx2x_get_vf_igu_cam_info()
1144 else if (current_pf == BP_FUNC(bp)) in bnx2x_get_vf_igu_cam_info()
1145 bnx2x_vf_set_igu_info(bp, sb_id, in bnx2x_get_vf_igu_cam_info()
1153 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); in bnx2x_get_vf_igu_cam_info()
1154 return BP_VFDB(bp)->vf_sbs_pool; in bnx2x_get_vf_igu_cam_info()
1157 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) in __bnx2x_iov_free_vfdb() argument
1159 if (bp->vfdb) { in __bnx2x_iov_free_vfdb()
1160 kfree(bp->vfdb->vfqs); in __bnx2x_iov_free_vfdb()
1161 kfree(bp->vfdb->vfs); in __bnx2x_iov_free_vfdb()
1162 kfree(bp->vfdb); in __bnx2x_iov_free_vfdb()
1164 bp->vfdb = NULL; in __bnx2x_iov_free_vfdb()
1167 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) in bnx2x_sriov_pci_cfg_info() argument
1170 struct pci_dev *dev = bp->pdev; in bnx2x_sriov_pci_cfg_info()
1192 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) in bnx2x_sriov_info() argument
1200 if (bnx2x_sriov_pci_cfg_info(bp, iov)) in bnx2x_sriov_info()
1207 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); in bnx2x_sriov_info()
1209 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); in bnx2x_sriov_info()
1213 BP_FUNC(bp), in bnx2x_sriov_info()
1221 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, in bnx2x_iov_init_one() argument
1226 struct pci_dev *dev = bp->pdev; in bnx2x_iov_init_one()
1228 bp->vfdb = NULL; in bnx2x_iov_init_one()
1231 if (IS_VF(bp)) in bnx2x_iov_init_one()
1239 if (CHIP_IS_E1x(bp)) in bnx2x_iov_init_one()
1247 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { in bnx2x_iov_init_one()
1249 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); in bnx2x_iov_init_one()
1262 if (!bnx2x_ari_enabled(bp->pdev)) { in bnx2x_iov_init_one()
1268 if (CHIP_INT_MODE_IS_BC(bp)) { in bnx2x_iov_init_one()
1274 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); in bnx2x_iov_init_one()
1275 if (!bp->vfdb) { in bnx2x_iov_init_one()
1286 iov = &(bp->vfdb->sriov); in bnx2x_iov_init_one()
1287 err = bnx2x_sriov_info(bp, iov); in bnx2x_iov_init_one()
1301 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * in bnx2x_iov_init_one()
1302 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); in bnx2x_iov_init_one()
1303 if (!bp->vfdb->vfs) { in bnx2x_iov_init_one()
1310 for_each_vf(bp, i) { in bnx2x_iov_init_one()
1311 bnx2x_vf(bp, i, index) = i; in bnx2x_iov_init_one()
1312 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; in bnx2x_iov_init_one()
1313 bnx2x_vf(bp, i, state) = VF_FREE; in bnx2x_iov_init_one()
1314 mutex_init(&bnx2x_vf(bp, i, op_mutex)); in bnx2x_iov_init_one()
1315 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; in bnx2x_iov_init_one()
1319 if (!bnx2x_get_vf_igu_cam_info(bp)) { in bnx2x_iov_init_one()
1326 bp->vfdb->vfqs = kzalloc( in bnx2x_iov_init_one()
1330 if (!bp->vfdb->vfqs) { in bnx2x_iov_init_one()
1337 mutex_init(&bp->vfdb->event_mutex); in bnx2x_iov_init_one()
1339 mutex_init(&bp->vfdb->bulletin_mutex); in bnx2x_iov_init_one()
1344 __bnx2x_iov_free_vfdb(bp); in bnx2x_iov_init_one()
1348 void bnx2x_iov_remove_one(struct bnx2x *bp) in bnx2x_iov_remove_one() argument
1353 if (!IS_SRIOV(bp)) in bnx2x_iov_remove_one()
1356 bnx2x_disable_sriov(bp); in bnx2x_iov_remove_one()
1359 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { in bnx2x_iov_remove_one()
1360 bnx2x_pretend_func(bp, in bnx2x_iov_remove_one()
1361 HW_VF_HANDLE(bp, in bnx2x_iov_remove_one()
1362 bp->vfdb->sriov.first_vf_in_pf + in bnx2x_iov_remove_one()
1365 bp->vfdb->sriov.first_vf_in_pf + vf_idx); in bnx2x_iov_remove_one()
1366 bnx2x_vf_enable_internal(bp, 0); in bnx2x_iov_remove_one()
1367 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_iov_remove_one()
1371 __bnx2x_iov_free_vfdb(bp); in bnx2x_iov_remove_one()
1374 void bnx2x_iov_free_mem(struct bnx2x *bp) in bnx2x_iov_free_mem() argument
1378 if (!IS_SRIOV(bp)) in bnx2x_iov_free_mem()
1383 struct hw_dma *cxt = &bp->vfdb->context[i]; in bnx2x_iov_free_mem()
1387 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, in bnx2x_iov_free_mem()
1388 BP_VFDB(bp)->sp_dma.mapping, in bnx2x_iov_free_mem()
1389 BP_VFDB(bp)->sp_dma.size); in bnx2x_iov_free_mem()
1391 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, in bnx2x_iov_free_mem()
1392 BP_VF_MBX_DMA(bp)->mapping, in bnx2x_iov_free_mem()
1393 BP_VF_MBX_DMA(bp)->size); in bnx2x_iov_free_mem()
1395 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, in bnx2x_iov_free_mem()
1396 BP_VF_BULLETIN_DMA(bp)->mapping, in bnx2x_iov_free_mem()
1397 BP_VF_BULLETIN_DMA(bp)->size); in bnx2x_iov_free_mem()
1400 int bnx2x_iov_alloc_mem(struct bnx2x *bp) in bnx2x_iov_alloc_mem() argument
1405 if (!IS_SRIOV(bp)) in bnx2x_iov_alloc_mem()
1409 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * in bnx2x_iov_alloc_mem()
1413 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); in bnx2x_iov_alloc_mem()
1428 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); in bnx2x_iov_alloc_mem()
1429 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, in bnx2x_iov_alloc_mem()
1431 if (!BP_VFDB(bp)->sp_dma.addr) in bnx2x_iov_alloc_mem()
1433 BP_VFDB(bp)->sp_dma.size = tot_size; in bnx2x_iov_alloc_mem()
1436 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; in bnx2x_iov_alloc_mem()
1437 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, in bnx2x_iov_alloc_mem()
1439 if (!BP_VF_MBX_DMA(bp)->addr) in bnx2x_iov_alloc_mem()
1442 BP_VF_MBX_DMA(bp)->size = tot_size; in bnx2x_iov_alloc_mem()
1445 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; in bnx2x_iov_alloc_mem()
1446 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, in bnx2x_iov_alloc_mem()
1448 if (!BP_VF_BULLETIN_DMA(bp)->addr) in bnx2x_iov_alloc_mem()
1451 BP_VF_BULLETIN_DMA(bp)->size = tot_size; in bnx2x_iov_alloc_mem()
1459 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfq_init() argument
1470 bnx2x_init_queue_obj(bp, &q->sp_obj, in bnx2x_vfq_init()
1472 bnx2x_vf_sp(bp, vf, q_data), in bnx2x_vfq_init()
1473 bnx2x_vf_sp_map(bp, vf, q_data), in bnx2x_vfq_init()
1484 static int bnx2x_max_speed_cap(struct bnx2x *bp) in bnx2x_max_speed_cap() argument
1486 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; in bnx2x_max_speed_cap()
1495 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) in bnx2x_iov_link_update_vf() argument
1497 struct bnx2x_link_report_data *state = &bp->last_reported_link; in bnx2x_iov_link_update_vf()
1504 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); in bnx2x_iov_link_update_vf()
1508 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_iov_link_update_vf()
1534 bulletin->link_speed = bnx2x_max_speed_cap(bp); in bnx2x_iov_link_update_vf()
1546 rc = bnx2x_post_vf_bulletin(bp, idx); in bnx2x_iov_link_update_vf()
1554 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_iov_link_update_vf()
1560 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_link_state() local
1561 struct bnx2x_virtf *vf = BP_VF(bp, idx); in bnx2x_set_vf_link_state()
1571 return bnx2x_iov_link_update_vf(bp, idx); in bnx2x_set_vf_link_state()
1574 void bnx2x_iov_link_update(struct bnx2x *bp) in bnx2x_iov_link_update() argument
1578 if (!IS_SRIOV(bp)) in bnx2x_iov_link_update()
1581 for_each_vf(bp, vfid) in bnx2x_iov_link_update()
1582 bnx2x_iov_link_update_vf(bp, vfid); in bnx2x_iov_link_update()
1586 int bnx2x_iov_nic_init(struct bnx2x *bp) in bnx2x_iov_nic_init() argument
1590 if (!IS_SRIOV(bp)) { in bnx2x_iov_nic_init()
1595 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); in bnx2x_iov_nic_init()
1601 for_each_vf(bp, vfid) { in bnx2x_iov_nic_init()
1602 struct bnx2x_virtf *vf = BP_VF(bp, vfid); in bnx2x_iov_nic_init()
1604 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * in bnx2x_iov_nic_init()
1608 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + in bnx2x_iov_nic_init()
1617 bnx2x_iov_static_resc(bp, vf); in bnx2x_iov_nic_init()
1621 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); in bnx2x_iov_nic_init()
1629 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, in bnx2x_iov_nic_init()
1631 bnx2x_vf_sp(bp, vf, mcast_rdata), in bnx2x_iov_nic_init()
1632 bnx2x_vf_sp_map(bp, vf, mcast_rdata), in bnx2x_iov_nic_init()
1638 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) in bnx2x_iov_nic_init()
1639 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * in bnx2x_iov_nic_init()
1642 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + in bnx2x_iov_nic_init()
1646 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); in bnx2x_iov_nic_init()
1650 for_each_vf(bp, vfid) { in bnx2x_iov_nic_init()
1651 struct bnx2x_virtf *vf = BP_VF(bp, vfid); in bnx2x_iov_nic_init()
1654 vf->bus = bnx2x_vf_bus(bp, vfid); in bnx2x_iov_nic_init()
1655 vf->devfn = bnx2x_vf_devfn(bp, vfid); in bnx2x_iov_nic_init()
1656 bnx2x_vf_set_bars(bp, vf); in bnx2x_iov_nic_init()
1670 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) in bnx2x_iov_chip_cleanup() argument
1674 if (!IS_SRIOV(bp)) in bnx2x_iov_chip_cleanup()
1678 for_each_vf(bp, i) in bnx2x_iov_chip_cleanup()
1679 bnx2x_vf_release(bp, BP_VF(bp, i)); in bnx2x_iov_chip_cleanup()
1685 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) in bnx2x_iov_init_ilt() argument
1688 struct bnx2x_ilt *ilt = BP_ILT(bp); in bnx2x_iov_init_ilt()
1690 if (!IS_SRIOV(bp)) in bnx2x_iov_init_ilt()
1695 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); in bnx2x_iov_init_ilt()
1704 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) in bnx2x_iov_is_vf_cid() argument
1711 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, in bnx2x_vf_handle_classification_eqe() argument
1723 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, in bnx2x_vf_handle_classification_eqe()
1727 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, in bnx2x_vf_handle_classification_eqe()
1742 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, in bnx2x_vf_handle_mcast_eqe() argument
1753 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_vf_handle_mcast_eqe()
1761 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, in bnx2x_vf_handle_filters_eqe() argument
1769 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, in bnx2x_vf_handle_rss_update_eqe() argument
1775 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) in bnx2x_iov_eq_sp_event() argument
1782 if (!IS_SRIOV(bp)) in bnx2x_iov_eq_sp_event()
1820 if (!bnx2x_iov_is_vf_cid(bp, cid)) { in bnx2x_iov_eq_sp_event()
1832 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_iov_eq_sp_event()
1844 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, in bnx2x_iov_eq_sp_event()
1852 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); in bnx2x_iov_eq_sp_event()
1857 bnx2x_vf_handle_mcast_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1862 bnx2x_vf_handle_filters_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1867 bnx2x_vf_handle_rss_update_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1877 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) in bnx2x_vf_by_cid() argument
1884 return bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_by_cid()
1887 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, in bnx2x_iov_set_queue_sp_obj() argument
1892 if (!IS_SRIOV(bp)) in bnx2x_iov_set_queue_sp_obj()
1895 vf = bnx2x_vf_by_cid(bp, vf_cid); in bnx2x_iov_set_queue_sp_obj()
1909 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) in bnx2x_iov_adjust_stats_req() argument
1918 if (!IS_SRIOV(bp)) in bnx2x_iov_adjust_stats_req()
1921 if (!NO_FCOE(bp)) in bnx2x_iov_adjust_stats_req()
1925 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; in bnx2x_iov_adjust_stats_req()
1931 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, in bnx2x_iov_adjust_stats_req()
1934 cur_data_offset = bp->fw_stats_data_mapping + in bnx2x_iov_adjust_stats_req()
1938 cur_query_entry = &bp->fw_stats_req-> in bnx2x_iov_adjust_stats_req()
1941 for_each_vf(bp, i) { in bnx2x_iov_adjust_stats_req()
1943 struct bnx2x_virtf *vf = BP_VF(bp, i); in bnx2x_iov_adjust_stats_req()
1960 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == in bnx2x_iov_adjust_stats_req()
1987 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; in bnx2x_iov_adjust_stats_req()
1991 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, in bnx2x_vf_qtbl_set_q() argument
1997 REG_WR(bp, reg, val); in bnx2x_vf_qtbl_set_q()
2000 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_clr_qtbl() argument
2005 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, in bnx2x_vf_clr_qtbl()
2009 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_igu_disable() argument
2014 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_igu_disable()
2015 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); in bnx2x_vf_igu_disable()
2018 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); in bnx2x_vf_igu_disable()
2019 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_igu_disable()
2022 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_max_queue_cnt() argument
2029 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_chk_avail_resc() argument
2032 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_chk_avail_resc()
2033 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_chk_avail_resc()
2044 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_acquire() argument
2047 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * in bnx2x_vf_acquire()
2051 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + in bnx2x_vf_acquire()
2064 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { in bnx2x_vf_acquire()
2083 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { in bnx2x_vf_acquire()
2092 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_acquire()
2093 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_acquire()
2097 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); in bnx2x_vf_acquire()
2127 bnx2x_vfq_init(bp, vf, q); in bnx2x_vf_acquire()
2133 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) in bnx2x_vf_init() argument
2143 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, in bnx2x_vf_init()
2157 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) in bnx2x_vf_init()
2161 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); in bnx2x_vf_init()
2175 func_init.pf_id = BP_FUNC(bp); in bnx2x_vf_init()
2180 bnx2x_func_init(bp, &func_init); in bnx2x_vf_init()
2183 bnx2x_vf_enable_access(bp, vf->abs_vfid); in bnx2x_vf_init()
2184 bnx2x_vf_enable_traffic(bp, vf); in bnx2x_vf_init()
2188 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, in bnx2x_vf_init()
2194 bnx2x_post_vf_bulletin(bp, vf->index); in bnx2x_vf_init()
2211 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_close() argument
2219 rc = bnx2x_vf_queue_teardown(bp, vf, i); in bnx2x_vf_close()
2226 bnx2x_vf_igu_disable(bp, vf); in bnx2x_vf_close()
2230 bnx2x_vf_clr_qtbl(bp, vf); in bnx2x_vf_close()
2241 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); in bnx2x_vf_close()
2258 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_free() argument
2271 rc = bnx2x_vf_close(bp, vf); in bnx2x_vf_free()
2277 bnx2x_vf_free_resc(bp, vf); in bnx2x_vf_free()
2291 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_rss_update() argument
2296 return bnx2x_config_rss(bp, rss); in bnx2x_vf_rss_update()
2299 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_tpa_update() argument
2322 rc = bnx2x_queue_state_change(bp, &qstate); in bnx2x_vf_tpa_update()
2338 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_release() argument
2343 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); in bnx2x_vf_release()
2345 rc = bnx2x_vf_free(bp, vf); in bnx2x_vf_release()
2350 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); in bnx2x_vf_release()
2354 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_lock_vf_pf_channel() argument
2374 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_unlock_vf_pf_channel() argument
2405 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) in bnx2x_set_pf_tx_switching() argument
2412 prev_flags = bp->flags; in bnx2x_set_pf_tx_switching()
2414 bp->flags |= TX_SWITCHING; in bnx2x_set_pf_tx_switching()
2416 bp->flags &= ~TX_SWITCHING; in bnx2x_set_pf_tx_switching()
2417 if (prev_flags == bp->flags) in bnx2x_set_pf_tx_switching()
2421 if ((bp->state != BNX2X_STATE_OPEN) || in bnx2x_set_pf_tx_switching()
2422 (bnx2x_get_q_logical_state(bp, in bnx2x_set_pf_tx_switching()
2423 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != in bnx2x_set_pf_tx_switching()
2441 for_each_eth_queue(bp, i) { in bnx2x_set_pf_tx_switching()
2442 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_pf_tx_switching()
2445 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; in bnx2x_set_pf_tx_switching()
2448 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_set_pf_tx_switching()
2461 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); in bnx2x_sriov_configure() local
2463 if (!IS_SRIOV(bp)) { in bnx2x_sriov_configure()
2469 num_vfs_param, BNX2X_NR_VIRTFN(bp)); in bnx2x_sriov_configure()
2472 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_sriov_configure()
2478 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { in bnx2x_sriov_configure()
2480 num_vfs_param, BNX2X_NR_VIRTFN(bp)); in bnx2x_sriov_configure()
2481 num_vfs_param = BNX2X_NR_VIRTFN(bp); in bnx2x_sriov_configure()
2484 bp->requested_nr_virtfn = num_vfs_param; in bnx2x_sriov_configure()
2486 bnx2x_set_pf_tx_switching(bp, false); in bnx2x_sriov_configure()
2487 bnx2x_disable_sriov(bp); in bnx2x_sriov_configure()
2490 return bnx2x_enable_sriov(bp); in bnx2x_sriov_configure()
2496 int bnx2x_enable_sriov(struct bnx2x *bp) in bnx2x_enable_sriov() argument
2498 int rc = 0, req_vfs = bp->requested_nr_virtfn; in bnx2x_enable_sriov()
2506 first_vf = bp->vfdb->sriov.first_vf_in_pf; in bnx2x_enable_sriov()
2510 BP_VFDB(bp)->vf_sbs_pool / req_vfs); in bnx2x_enable_sriov()
2514 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); in bnx2x_enable_sriov()
2517 vf_sb_count(BP_VF(bp, vf_idx)) = 0; in bnx2x_enable_sriov()
2519 bp->vfdb->vf_sbs_pool = 0; in bnx2x_enable_sriov()
2522 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; in bnx2x_enable_sriov()
2531 REG_WR(bp, address, igu_entry); in bnx2x_enable_sriov()
2538 bnx2x_get_vf_igu_cam_info(bp); in bnx2x_enable_sriov()
2541 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); in bnx2x_enable_sriov()
2544 for_each_vf(bp, vf_idx) { in bnx2x_enable_sriov()
2545 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); in bnx2x_enable_sriov()
2548 vf->vfqs = &bp->vfdb->vfqs[qcount]; in bnx2x_enable_sriov()
2550 bnx2x_iov_static_resc(bp, vf); in bnx2x_enable_sriov()
2558 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); in bnx2x_enable_sriov()
2559 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, in bnx2x_enable_sriov()
2564 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_enable_sriov()
2570 bnx2x_disable_sriov(bp); in bnx2x_enable_sriov()
2572 rc = bnx2x_set_pf_tx_switching(bp, true); in bnx2x_enable_sriov()
2576 rc = pci_enable_sriov(bp->pdev, req_vfs); in bnx2x_enable_sriov()
2585 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) in bnx2x_pf_set_vfs_vlan() argument
2591 for_each_vf(bp, vfidx) { in bnx2x_pf_set_vfs_vlan()
2592 bulletin = BP_VF_BULLETIN(bp, vfidx); in bnx2x_pf_set_vfs_vlan()
2593 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) in bnx2x_pf_set_vfs_vlan()
2594 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); in bnx2x_pf_set_vfs_vlan()
2598 void bnx2x_disable_sriov(struct bnx2x *bp) in bnx2x_disable_sriov() argument
2600 if (pci_vfs_assigned(bp->pdev)) { in bnx2x_disable_sriov()
2606 pci_disable_sriov(bp->pdev); in bnx2x_disable_sriov()
2609 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, in bnx2x_vf_op_prep() argument
2614 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_vf_op_prep()
2619 if (!IS_SRIOV(bp)) { in bnx2x_vf_op_prep()
2624 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { in bnx2x_vf_op_prep()
2626 vfidx, BNX2X_NR_VIRTFN(bp)); in bnx2x_vf_op_prep()
2631 *vf = BP_VF(bp, vfidx); in bnx2x_vf_op_prep()
2632 *bulletin = BP_VF_BULLETIN(bp, vfidx); in bnx2x_vf_op_prep()
2657 struct bnx2x *bp = netdev_priv(dev); in bnx2x_get_vf_config() local
2665 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_get_vf_config()
2683 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { in bnx2x_get_vf_config()
2684 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, in bnx2x_get_vf_config()
2686 vlan_obj->get_n_elements(bp, vlan_obj, 1, in bnx2x_get_vf_config()
2691 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_get_vf_config()
2708 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_get_vf_config()
2733 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_mac() local
2744 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_set_vf_mac()
2748 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_mac()
2757 rc = bnx2x_post_vf_bulletin(bp, vfidx); in bnx2x_set_vf_mac()
2760 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_mac()
2768 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); in bnx2x_set_vf_mac()
2776 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_set_vf_mac()
2780 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); in bnx2x_set_vf_mac()
2784 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); in bnx2x_set_vf_mac()
2792 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); in bnx2x_set_vf_mac()
2801 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, in bnx2x_set_vf_mac()
2805 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); in bnx2x_set_vf_mac()
2818 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_vlan() local
2835 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_set_vf_vlan()
2845 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_vlan()
2853 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_vlan()
2857 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != in bnx2x_set_vf_vlan()
2862 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_set_vf_vlan()
2866 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); in bnx2x_set_vf_vlan()
2871 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, in bnx2x_set_vf_vlan()
2886 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, in bnx2x_set_vf_vlan()
2889 bnx2x_config_rx_mode(bp, &rx_ramrod); in bnx2x_set_vf_vlan()
2900 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); in bnx2x_set_vf_vlan()
2942 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_set_vf_vlan()
2955 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); in bnx2x_set_vf_vlan()
2974 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) in bnx2x_sample_bulletin() argument
2986 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, in bnx2x_sample_bulletin()
2989 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); in bnx2x_sample_bulletin()
2991 if (bp->shadow_bulletin.content.crc == crc) in bnx2x_sample_bulletin()
2995 bp->shadow_bulletin.content.crc, crc); in bnx2x_sample_bulletin()
3003 bulletin = &bp->shadow_bulletin.content; in bnx2x_sample_bulletin()
3006 if (bp->old_bulletin.version == bulletin->version) in bnx2x_sample_bulletin()
3011 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { in bnx2x_sample_bulletin()
3013 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); in bnx2x_sample_bulletin()
3020 bp->vf_link_vars.line_speed = bulletin->link_speed; in bnx2x_sample_bulletin()
3021 bp->vf_link_vars.link_report_flags = 0; in bnx2x_sample_bulletin()
3025 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3029 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3033 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3037 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3038 __bnx2x_link_report(bp); in bnx2x_sample_bulletin()
3042 memcpy(&bp->old_bulletin, bulletin, in bnx2x_sample_bulletin()
3048 void bnx2x_timer_sriov(struct bnx2x *bp) in bnx2x_timer_sriov() argument
3050 bnx2x_sample_bulletin(bp); in bnx2x_timer_sriov()
3053 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) in bnx2x_timer_sriov()
3054 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, in bnx2x_timer_sriov()
3058 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) in bnx2x_vf_doorbells() argument
3061 return bp->regview + PXP_VF_ADDR_DB_START; in bnx2x_vf_doorbells()
3064 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) in bnx2x_vf_pci_dealloc() argument
3066 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, in bnx2x_vf_pci_dealloc()
3068 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, in bnx2x_vf_pci_dealloc()
3072 int bnx2x_vf_pci_alloc(struct bnx2x *bp) in bnx2x_vf_pci_alloc() argument
3074 mutex_init(&bp->vf2pf_mutex); in bnx2x_vf_pci_alloc()
3077 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, in bnx2x_vf_pci_alloc()
3079 if (!bp->vf2pf_mbox) in bnx2x_vf_pci_alloc()
3083 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, in bnx2x_vf_pci_alloc()
3085 if (!bp->pf2vf_bulletin) in bnx2x_vf_pci_alloc()
3088 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); in bnx2x_vf_pci_alloc()
3093 bnx2x_vf_pci_dealloc(bp); in bnx2x_vf_pci_alloc()
3097 void bnx2x_iov_channel_down(struct bnx2x *bp) in bnx2x_iov_channel_down() argument
3102 if (!IS_SRIOV(bp)) in bnx2x_iov_channel_down()
3105 for_each_vf(bp, vf_idx) { in bnx2x_iov_channel_down()
3109 bulletin = BP_VF_BULLETIN(bp, vf_idx); in bnx2x_iov_channel_down()
3113 bnx2x_post_vf_bulletin(bp, vf_idx); in bnx2x_iov_channel_down()
3119 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); in bnx2x_iov_task() local
3121 if (!netif_running(bp->dev)) in bnx2x_iov_task()
3125 &bp->iov_task_state)) in bnx2x_iov_task()
3126 bnx2x_vf_handle_flr_event(bp); in bnx2x_iov_task()
3129 &bp->iov_task_state)) in bnx2x_iov_task()
3130 bnx2x_vf_mbx(bp); in bnx2x_iov_task()
3133 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) in bnx2x_schedule_iov_task() argument
3136 set_bit(flag, &bp->iov_task_state); in bnx2x_schedule_iov_task()
3139 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); in bnx2x_schedule_iov_task()