Lines Matching refs:bp

26 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
29 static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, in bnx2x_add_tlv() argument
40 static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, in bnx2x_vfpf_prep() argument
43 mutex_lock(&bp->vf2pf_mutex); in bnx2x_vfpf_prep()
49 memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); in bnx2x_vfpf_prep()
52 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); in bnx2x_vfpf_prep()
55 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); in bnx2x_vfpf_prep()
59 static void bnx2x_vfpf_finalize(struct bnx2x *bp, in bnx2x_vfpf_finalize() argument
65 mutex_unlock(&bp->vf2pf_mutex); in bnx2x_vfpf_finalize()
69 static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, in bnx2x_search_tlv_list() argument
93 static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) in bnx2x_dp_tlv_list() argument
141 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) in bnx2x_send_msg2pf() argument
144 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); in bnx2x_send_msg2pf()
156 bnx2x_sample_bulletin(bp); in bnx2x_send_msg2pf()
157 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { in bnx2x_send_msg2pf()
194 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) in bnx2x_get_vf_id() argument
201 me_reg = readl(bp->doorbells); in bnx2x_get_vf_id()
223 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) in bnx2x_vfpf_acquire() argument
226 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; in bnx2x_vfpf_acquire()
227 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; in bnx2x_vfpf_acquire()
234 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); in bnx2x_vfpf_acquire()
236 if (bnx2x_get_vf_id(bp, &vf_id)) { in bnx2x_vfpf_acquire()
247 req->resc_request.num_sbs = bp->igu_sb_cnt; in bnx2x_vfpf_acquire()
253 req->bulletin_addr = bp->pf2vf_bulletin_mapping; in bnx2x_vfpf_acquire()
256 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, in bnx2x_vfpf_acquire()
265 bnx2x_add_tlv(bp, req, in bnx2x_vfpf_acquire()
271 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_acquire()
277 rc = bnx2x_send_msg2pf(bp, in bnx2x_vfpf_acquire()
279 bp->vf2pf_mbox_mapping); in bnx2x_vfpf_acquire()
286 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); in bnx2x_vfpf_acquire()
293 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { in bnx2x_vfpf_acquire()
296 } else if (bp->acquire_resp.hdr.status == in bnx2x_vfpf_acquire()
305 bp->acquire_resp.resc.num_txqs); in bnx2x_vfpf_acquire()
308 bp->acquire_resp.resc.num_rxqs); in bnx2x_vfpf_acquire()
311 bp->acquire_resp.resc.num_sbs); in bnx2x_vfpf_acquire()
314 bp->acquire_resp.resc.num_mac_filters); in bnx2x_vfpf_acquire()
317 bp->acquire_resp.resc.num_vlan_filters); in bnx2x_vfpf_acquire()
320 bp->acquire_resp.resc.num_mc_filters); in bnx2x_vfpf_acquire()
323 memset(&bp->vf2pf_mbox->resp, 0, in bnx2x_vfpf_acquire()
327 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, in bnx2x_vfpf_acquire()
333 bp->acquire_resp.hdr.status); in bnx2x_vfpf_acquire()
341 bnx2x_search_tlv_list(bp, resp, in bnx2x_vfpf_acquire()
344 memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); in bnx2x_vfpf_acquire()
345 bp->flags |= HAS_PHYS_PORT_ID; in bnx2x_vfpf_acquire()
352 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, in bnx2x_vfpf_acquire()
360 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_acquire()
361 bnx2x_vfpf_release(bp); in bnx2x_vfpf_acquire()
368 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); in bnx2x_vfpf_acquire()
369 bp->link_params.chip_id = bp->common.chip_id; in bnx2x_vfpf_acquire()
370 bp->db_size = bp->acquire_resp.pfdev_info.db_size; in bnx2x_vfpf_acquire()
371 bp->common.int_block = INT_BLOCK_IGU; in bnx2x_vfpf_acquire()
372 bp->common.chip_port_mode = CHIP_2_PORT_MODE; in bnx2x_vfpf_acquire()
373 bp->igu_dsb_id = -1; in bnx2x_vfpf_acquire()
374 bp->mf_ov = 0; in bnx2x_vfpf_acquire()
375 bp->mf_mode = 0; in bnx2x_vfpf_acquire()
376 bp->common.flash_size = 0; in bnx2x_vfpf_acquire()
377 bp->flags |= in bnx2x_vfpf_acquire()
379 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; in bnx2x_vfpf_acquire()
380 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; in bnx2x_vfpf_acquire()
381 bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters; in bnx2x_vfpf_acquire()
383 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, in bnx2x_vfpf_acquire()
384 sizeof(bp->fw_ver)); in bnx2x_vfpf_acquire()
386 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) in bnx2x_vfpf_acquire()
387 memcpy(bp->dev->dev_addr, in bnx2x_vfpf_acquire()
388 bp->acquire_resp.resc.current_mac_addr, in bnx2x_vfpf_acquire()
392 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_acquire()
396 int bnx2x_vfpf_release(struct bnx2x *bp) in bnx2x_vfpf_release() argument
398 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; in bnx2x_vfpf_release()
399 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_release()
403 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); in bnx2x_vfpf_release()
405 if (bnx2x_get_vf_id(bp, &vf_id)) { in bnx2x_vfpf_release()
413 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_release()
417 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_release()
420 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_release()
437 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_release()
443 int bnx2x_vfpf_init(struct bnx2x *bp) in bnx2x_vfpf_init() argument
445 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init; in bnx2x_vfpf_init()
446 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_init()
450 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req)); in bnx2x_vfpf_init()
453 for_each_eth_queue(bp, i) in bnx2x_vfpf_init()
454 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i, in bnx2x_vfpf_init()
458 req->stats_addr = bp->fw_stats_data_mapping + in bnx2x_vfpf_init()
464 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_init()
468 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_init()
470 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_init()
483 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_init()
489 void bnx2x_vfpf_close_vf(struct bnx2x *bp) in bnx2x_vfpf_close_vf() argument
491 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close; in bnx2x_vfpf_close_vf()
492 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_close_vf()
499 if (bnx2x_get_vf_id(bp, &vf_id)) in bnx2x_vfpf_close_vf()
503 for_each_queue(bp, i) in bnx2x_vfpf_close_vf()
504 bnx2x_vfpf_teardown_queue(bp, i); in bnx2x_vfpf_close_vf()
507 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false); in bnx2x_vfpf_close_vf()
510 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); in bnx2x_vfpf_close_vf()
515 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_close_vf()
519 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_close_vf()
521 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_close_vf()
530 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_close_vf()
534 bnx2x_netif_stop(bp, 0); in bnx2x_vfpf_close_vf()
536 bnx2x_del_all_napi(bp); in bnx2x_vfpf_close_vf()
539 bnx2x_free_irq(bp); in bnx2x_vfpf_close_vf()
542 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_leading_vfq_init() argument
549 bnx2x_init_mac_obj(bp, &q->mac_obj, in bnx2x_leading_vfq_init()
551 bnx2x_vf_sp(bp, vf, mac_rdata), in bnx2x_leading_vfq_init()
552 bnx2x_vf_sp_map(bp, vf, mac_rdata), in bnx2x_leading_vfq_init()
558 bnx2x_init_vlan_obj(bp, &q->vlan_obj, in bnx2x_leading_vfq_init()
560 bnx2x_vf_sp(bp, vf, vlan_rdata), in bnx2x_leading_vfq_init()
561 bnx2x_vf_sp_map(bp, vf, vlan_rdata), in bnx2x_leading_vfq_init()
567 bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj, in bnx2x_leading_vfq_init()
569 bnx2x_vf_sp(bp, vf, vlan_mac_rdata), in bnx2x_leading_vfq_init()
570 bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata), in bnx2x_leading_vfq_init()
577 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, in bnx2x_leading_vfq_init()
579 bnx2x_vf_sp(bp, vf, mcast_rdata), in bnx2x_leading_vfq_init()
580 bnx2x_vf_sp_map(bp, vf, mcast_rdata), in bnx2x_leading_vfq_init()
586 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, in bnx2x_leading_vfq_init()
588 bnx2x_vf_sp(bp, vf, rss_rdata), in bnx2x_leading_vfq_init()
589 bnx2x_vf_sp_map(bp, vf, rss_rdata), in bnx2x_leading_vfq_init()
600 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_vfpf_setup_q() argument
603 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; in bnx2x_vfpf_setup_q()
604 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_setup_q()
610 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); in bnx2x_vfpf_setup_q()
640 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; in bnx2x_vfpf_setup_q()
641 req->rxq.mtu = bp->dev->mtu; in bnx2x_vfpf_setup_q()
645 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; in bnx2x_vfpf_setup_q()
657 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; in bnx2x_vfpf_setup_q()
662 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_setup_q()
666 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_setup_q()
668 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_setup_q()
679 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_setup_q()
684 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) in bnx2x_vfpf_teardown_queue() argument
686 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; in bnx2x_vfpf_teardown_queue()
687 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_teardown_queue()
691 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q, in bnx2x_vfpf_teardown_queue()
697 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_teardown_queue()
701 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_teardown_queue()
703 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_teardown_queue()
719 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_teardown_queue()
725 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) in bnx2x_vfpf_config_mac() argument
727 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; in bnx2x_vfpf_config_mac()
728 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_config_mac()
729 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; in bnx2x_vfpf_config_mac()
733 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, in bnx2x_vfpf_config_mac()
745 bnx2x_sample_bulletin(bp); in bnx2x_vfpf_config_mac()
751 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_config_mac()
755 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_config_mac()
758 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_config_mac()
770 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); in bnx2x_vfpf_config_mac()
773 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { in bnx2x_vfpf_config_mac()
775 memcpy(req->filters[0].mac, bp->dev->dev_addr, in bnx2x_vfpf_config_mac()
779 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, in bnx2x_vfpf_config_mac()
780 bp->vf2pf_mbox_mapping); in bnx2x_vfpf_config_mac()
792 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_config_mac()
798 int bnx2x_vfpf_config_rss(struct bnx2x *bp, in bnx2x_vfpf_config_rss() argument
801 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_config_rss()
802 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; in bnx2x_vfpf_config_rss()
806 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, in bnx2x_vfpf_config_rss()
810 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_config_rss()
842 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_config_rss()
845 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_config_rss()
861 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_config_rss()
868 struct bnx2x *bp = netdev_priv(dev); in bnx2x_vfpf_set_mcast() local
869 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; in bnx2x_vfpf_set_mcast()
870 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_set_mcast()
874 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_vfpf_set_mcast()
875 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); in bnx2x_vfpf_set_mcast()
880 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, in bnx2x_vfpf_set_mcast()
908 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_set_mcast()
912 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_set_mcast()
913 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_set_mcast()
925 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_set_mcast()
931 int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) in bnx2x_vfpf_update_vlan() argument
933 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; in bnx2x_vfpf_update_vlan()
934 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_update_vlan()
937 if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) { in bnx2x_vfpf_update_vlan()
943 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, in bnx2x_vfpf_update_vlan()
956 bnx2x_sample_bulletin(bp); in bnx2x_vfpf_update_vlan()
958 if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { in bnx2x_vfpf_update_vlan()
967 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_update_vlan()
971 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_update_vlan()
974 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_update_vlan()
986 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_update_vlan()
991 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) in bnx2x_vfpf_storm_rx_mode() argument
993 int mode = bp->rx_mode; in bnx2x_vfpf_storm_rx_mode()
994 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; in bnx2x_vfpf_storm_rx_mode()
995 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; in bnx2x_vfpf_storm_rx_mode()
999 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, in bnx2x_vfpf_storm_rx_mode()
1018 if (bp->accept_any_vlan) in bnx2x_vfpf_storm_rx_mode()
1025 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_storm_rx_mode()
1029 bnx2x_dp_tlv_list(bp, req); in bnx2x_vfpf_storm_rx_mode()
1031 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); in bnx2x_vfpf_storm_rx_mode()
1040 bnx2x_vfpf_finalize(bp, &req->first_tlv); in bnx2x_vfpf_storm_rx_mode()
1046 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) in storm_memset_vf_mbx_ack() argument
1051 REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY); in storm_memset_vf_mbx_ack()
1054 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) in storm_memset_vf_mbx_valid() argument
1059 REG_WR8(bp, addr, 1); in storm_memset_vf_mbx_valid()
1063 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_enable_mbx() argument
1065 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); in bnx2x_vf_enable_mbx()
1068 storm_memset_vf_mbx_ack(bp, abs_vfid); in bnx2x_vf_enable_mbx()
1069 storm_memset_vf_mbx_valid(bp, abs_vfid); in bnx2x_vf_enable_mbx()
1072 bnx2x_vf_enable_access(bp, abs_vfid); in bnx2x_vf_enable_mbx()
1076 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, in bnx2x_copy32_vf_dmae() argument
1082 if (CHIP_IS_E1x(bp)) { in bnx2x_copy32_vf_dmae()
1087 if (!bp->dmae_ready) { in bnx2x_copy32_vf_dmae()
1093 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); in bnx2x_copy32_vf_dmae()
1121 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); in bnx2x_copy32_vf_dmae()
1124 static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, in bnx2x_vf_mbx_resp_single_tlv() argument
1127 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); in bnx2x_vf_mbx_resp_single_tlv()
1135 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); in bnx2x_vf_mbx_resp_single_tlv()
1136 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, in bnx2x_vf_mbx_resp_single_tlv()
1140 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, in bnx2x_vf_mbx_resp_send_msg() argument
1144 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); in bnx2x_vf_mbx_resp_send_msg()
1150 bnx2x_dp_tlv_list(bp, resp); in bnx2x_vf_mbx_resp_send_msg()
1167 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, in bnx2x_vf_mbx_resp_send_msg()
1180 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); in bnx2x_vf_mbx_resp_send_msg()
1186 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, in bnx2x_vf_mbx_resp_send_msg()
1192 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); in bnx2x_vf_mbx_resp_send_msg()
1202 bnx2x_vf_release(bp, vf); in bnx2x_vf_mbx_resp_send_msg()
1205 static void bnx2x_vf_mbx_resp(struct bnx2x *bp, in bnx2x_vf_mbx_resp() argument
1209 bnx2x_vf_mbx_resp_single_tlv(bp, vf); in bnx2x_vf_mbx_resp()
1210 bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); in bnx2x_vf_mbx_resp()
1213 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, in bnx2x_vf_mbx_resp_phys_port() argument
1220 if (!(bp->flags & HAS_PHYS_PORT_ID)) in bnx2x_vf_mbx_resp_phys_port()
1223 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, in bnx2x_vf_mbx_resp_phys_port()
1228 memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); in bnx2x_vf_mbx_resp_phys_port()
1236 static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp, in bnx2x_vf_mbx_resp_fp_hsi_ver() argument
1243 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT, in bnx2x_vf_mbx_resp_fp_hsi_ver()
1256 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_acquire_resp() argument
1268 resp->pfdev_info.chip_num = bp->common.chip_id; in bnx2x_vf_mbx_acquire_resp()
1269 resp->pfdev_info.db_size = bp->db_size; in bnx2x_vf_mbx_acquire_resp()
1275 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, in bnx2x_vf_mbx_acquire_resp()
1284 bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_mbx_acquire_resp()
1286 bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_mbx_acquire_resp()
1295 BP_VF_BULLETIN(bp, vf->index); in bnx2x_vf_mbx_acquire_resp()
1341 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length); in bnx2x_vf_mbx_acquire_resp()
1347 if (bnx2x_search_tlv_list(bp, &mbx->msg->req, in bnx2x_vf_mbx_acquire_resp()
1349 bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); in bnx2x_vf_mbx_acquire_resp()
1355 bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length); in bnx2x_vf_mbx_acquire_resp()
1357 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, in bnx2x_vf_mbx_acquire_resp()
1361 bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); in bnx2x_vf_mbx_acquire_resp()
1364 static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp, in bnx2x_vf_mbx_is_windows_vm() argument
1381 static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp, in bnx2x_vf_mbx_acquire_chk_dorq() argument
1388 if (bnx2x_search_tlv_list(bp, &mbx->msg->req, in bnx2x_vf_mbx_acquire_chk_dorq()
1393 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) in bnx2x_vf_mbx_acquire_chk_dorq()
1399 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_acquire() argument
1418 rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx); in bnx2x_vf_mbx_acquire()
1429 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) in bnx2x_vf_mbx_acquire()
1444 rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); in bnx2x_vf_mbx_acquire()
1466 bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); in bnx2x_vf_mbx_acquire()
1469 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_init_vf() argument
1478 rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); in bnx2x_vf_mbx_init_vf()
1486 bnx2x_iov_link_update_vf(bp, vf->index); in bnx2x_vf_mbx_init_vf()
1489 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_init_vf()
1493 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, in bnx2x_vf_mbx_set_q_flags() argument
1516 if (IS_MF_SD(bp)) in bnx2x_vf_mbx_set_q_flags()
1520 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_setup_q() argument
1546 bnx2x_leading_vfq_init(bp, vf, q); in bnx2x_vf_mbx_setup_q()
1570 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, in bnx2x_vf_mbx_setup_q()
1574 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, in bnx2x_vf_mbx_setup_q()
1584 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p, in bnx2x_vf_mbx_setup_q()
1602 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, in bnx2x_vf_mbx_setup_q()
1606 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, in bnx2x_vf_mbx_setup_q()
1634 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p, in bnx2x_vf_mbx_setup_q()
1638 bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); in bnx2x_vf_mbx_setup_q()
1640 rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); in bnx2x_vf_mbx_setup_q()
1645 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_setup_q()
1648 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, in bnx2x_vf_mbx_macvlan_list() argument
1704 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, in bnx2x_vf_mbx_dp_q_filter() argument
1715 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, in bnx2x_vf_mbx_dp_q_filters() argument
1722 bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i, in bnx2x_vf_mbx_dp_q_filters()
1737 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_mbx_qfilters() argument
1742 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; in bnx2x_vf_mbx_qfilters()
1749 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, in bnx2x_vf_mbx_qfilters()
1757 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, in bnx2x_vf_mbx_qfilters()
1767 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, in bnx2x_vf_mbx_qfilters()
1774 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, in bnx2x_vf_mbx_qfilters()
1786 BP_VF_BULLETIN(bp, vf->index); in bnx2x_vf_mbx_qfilters()
1807 rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); in bnx2x_vf_mbx_qfilters()
1814 rc = bnx2x_vf_mcast(bp, vf, msg->multicast, in bnx2x_vf_mbx_qfilters()
1826 static int bnx2x_filters_validate_mac(struct bnx2x *bp, in bnx2x_filters_validate_mac() argument
1830 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); in bnx2x_filters_validate_mac()
1876 static int bnx2x_filters_validate_vlan(struct bnx2x *bp, in bnx2x_filters_validate_vlan() argument
1880 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); in bnx2x_filters_validate_vlan()
1906 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, in bnx2x_vf_mbx_set_q_filters() argument
1913 rc = bnx2x_filters_validate_mac(bp, vf, filters); in bnx2x_vf_mbx_set_q_filters()
1917 rc = bnx2x_filters_validate_vlan(bp, vf, filters); in bnx2x_vf_mbx_set_q_filters()
1926 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); in bnx2x_vf_mbx_set_q_filters()
1928 rc = bnx2x_vf_mbx_qfilters(bp, vf); in bnx2x_vf_mbx_set_q_filters()
1930 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_set_q_filters()
1933 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_teardown_q() argument
1942 rc = bnx2x_vf_queue_teardown(bp, vf, qid); in bnx2x_vf_mbx_teardown_q()
1943 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_teardown_q()
1946 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_close_vf() argument
1953 rc = bnx2x_vf_close(bp, vf); in bnx2x_vf_mbx_close_vf()
1954 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_close_vf()
1957 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_release_vf() argument
1964 rc = bnx2x_vf_free(bp, vf); in bnx2x_vf_mbx_release_vf()
1965 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_release_vf()
1968 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_update_rss() argument
2024 rc = bnx2x_vf_rss_update(bp, vf, &rss); in bnx2x_vf_mbx_update_rss()
2026 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_update_rss()
2029 static int bnx2x_validate_tpa_params(struct bnx2x *bp, in bnx2x_validate_tpa_params() argument
2042 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { in bnx2x_validate_tpa_params()
2046 MAX_AGG_QS(bp)); in bnx2x_validate_tpa_params()
2052 static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_update_tpa() argument
2061 if (bnx2x_validate_tpa_params(bp, tpa_tlv)) in bnx2x_vf_mbx_update_tpa()
2087 rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); in bnx2x_vf_mbx_update_tpa()
2090 bnx2x_vf_mbx_resp(bp, vf, rc); in bnx2x_vf_mbx_update_tpa()
2094 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mbx_request() argument
2104 bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); in bnx2x_vf_mbx_request()
2109 bnx2x_vf_mbx_acquire(bp, vf, mbx); in bnx2x_vf_mbx_request()
2112 bnx2x_vf_mbx_init_vf(bp, vf, mbx); in bnx2x_vf_mbx_request()
2115 bnx2x_vf_mbx_setup_q(bp, vf, mbx); in bnx2x_vf_mbx_request()
2118 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); in bnx2x_vf_mbx_request()
2121 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); in bnx2x_vf_mbx_request()
2124 bnx2x_vf_mbx_close_vf(bp, vf, mbx); in bnx2x_vf_mbx_request()
2127 bnx2x_vf_mbx_release_vf(bp, vf, mbx); in bnx2x_vf_mbx_request()
2130 bnx2x_vf_mbx_update_rss(bp, vf, mbx); in bnx2x_vf_mbx_request()
2133 bnx2x_vf_mbx_update_tpa(bp, vf, mbx); in bnx2x_vf_mbx_request()
2155 bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); in bnx2x_vf_mbx_request()
2161 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); in bnx2x_vf_mbx_request()
2164 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); in bnx2x_vf_mbx_request()
2168 void bnx2x_vf_mbx_schedule(struct bnx2x *bp, in bnx2x_vf_mbx_schedule() argument
2179 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > in bnx2x_vf_mbx_schedule()
2180 BNX2X_NR_VIRTFN(bp)) { in bnx2x_vf_mbx_schedule()
2182 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); in bnx2x_vf_mbx_schedule()
2186 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); in bnx2x_vf_mbx_schedule()
2189 mutex_lock(&BP_VFDB(bp)->event_mutex); in bnx2x_vf_mbx_schedule()
2190 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; in bnx2x_vf_mbx_schedule()
2191 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; in bnx2x_vf_mbx_schedule()
2192 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); in bnx2x_vf_mbx_schedule()
2193 mutex_unlock(&BP_VFDB(bp)->event_mutex); in bnx2x_vf_mbx_schedule()
2195 bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); in bnx2x_vf_mbx_schedule()
2199 void bnx2x_vf_mbx(struct bnx2x *bp) in bnx2x_vf_mbx() argument
2201 struct bnx2x_vfdb *vfdb = BP_VFDB(bp); in bnx2x_vf_mbx()
2214 for_each_vf(bp, vf_idx) { in bnx2x_vf_mbx()
2215 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); in bnx2x_vf_mbx()
2216 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); in bnx2x_vf_mbx()
2228 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, in bnx2x_vf_mbx()
2235 bnx2x_vf_release(bp, vf); in bnx2x_vf_mbx()
2248 bnx2x_vf_mbx_request(bp, vf, mbx); in bnx2x_vf_mbx()
2264 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) in bnx2x_post_vf_bulletin() argument
2266 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf); in bnx2x_post_vf_bulletin()
2267 dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + in bnx2x_post_vf_bulletin()
2269 dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map); in bnx2x_post_vf_bulletin()
2273 if (bnx2x_vf(bp, vf, state) != VF_ENABLED && in bnx2x_post_vf_bulletin()
2274 bnx2x_vf(bp, vf, state) != VF_ACQUIRED) in bnx2x_post_vf_bulletin()
2280 (bnx2x_vf(bp, vf, cfg_flags) & in bnx2x_post_vf_bulletin()
2284 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, in bnx2x_post_vf_bulletin()
2285 bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr), in bnx2x_post_vf_bulletin()