From patchwork Mon Jan 4 08:07:16 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenzhuo Lu X-Patchwork-Id: 9716 X-Patchwork-Delegate: bruce.richardson@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 3B71F8E87; Mon, 4 Jan 2016 09:07:26 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 1B66E8E83 for ; Mon, 4 Jan 2016 09:07:23 +0100 (CET) Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga103.fm.intel.com with ESMTP; 04 Jan 2016 00:07:23 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,519,1444719600"; d="scan'208";a="23154089" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga004.fm.intel.com with ESMTP; 04 Jan 2016 00:07:22 -0800 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id u0487KqE002463; Mon, 4 Jan 2016 16:07:20 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id u0487IVO027604; Mon, 4 Jan 2016 16:07:20 +0800 Received: (from wenzhuol@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id u0487IiJ027600; Mon, 4 Jan 2016 16:07:18 +0800 From: Wenzhuo Lu To: dev@dpdk.org Date: Mon, 4 Jan 2016 16:07:16 +0800 Message-Id: <1451894836-27569-1-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.7.4.1 Subject: [dpdk-dev] [PATCH] ixgbe: support multicast promiscuous mode on VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add multicast promiscuous mode support on ixgbe VF driver. Please note if we want to use this promiscuous mode, we need both PF and VF driver to support it. The reason is this VF feature is configged on PF. If use kernel PF driver + dpdk VF driver, make sure kernel PF driver support VF multicast promiscuous mode. If use dpdk PF + dpdk VF, better make sure PF driver is the same version as VF. Signed-off-by: Wenzhuo Lu Acked-by: Shaopeng He --- drivers/net/ixgbe/base/ixgbe_mbx.h | 4 +++ drivers/net/ixgbe/ixgbe_ethdev.c | 66 ++++++++++++++++++++++++++++++++++++++ drivers/net/ixgbe/ixgbe_pf.c | 65 +++++++++++++++++++++++++++++++++++++ 3 files changed, 135 insertions(+) diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h index 445df10..4a120a3 100644 --- a/drivers/net/ixgbe/base/ixgbe_mbx.h +++ b/drivers/net/ixgbe/base/ixgbe_mbx.h @@ -89,6 +89,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -107,6 +108,9 @@ enum ixgbe_pfvf_api_rev { /* mailbox API, version 1.1 VF requests */ #define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C + /* GET_QUEUES return data indices within the mailbox */ #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 4c4c6df..3308a05 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -138,6 +138,12 @@ #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_dev_configure(struct rte_eth_dev *dev); @@ -237,6 +243,8 @@ static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbevf_configure_msix(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct @@ -511,6 +519,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .stats_reset = ixgbevf_dev_stats_reset, .xstats_reset = ixgbevf_dev_stats_reset, .dev_close = ixgbevf_dev_close, + .allmulticast_enable = ixgbevf_dev_allmulticast_enable, + .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, .mtu_set = ixgbevf_dev_set_mtu, .vlan_filter_set = ixgbevf_vlan_filter_set, @@ -1224,6 +1234,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) /* start with highest supported, proceed down */ static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, }; @@ -6191,6 +6202,61 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, return 0; } +/* ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + */ +static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, + int xcast_mode) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + return 0; +} + +static void +ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); +} + +static void +ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); +} + static struct rte_driver rte_ixgbe_driver = { .type = PMD_PDEV, .init = rte_ixgbe_pmd_init, diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index 2ffbd1f..3439f87 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -410,6 +410,40 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) } static int +ixgbe_enable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vmolr; + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + RTE_LOG(INFO, PMD, "VF %u: enabling multicast promiscuous\n", vf); + + vmolr |= IXGBE_VMOLR_MPE; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +static int +ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vmolr; + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + RTE_LOG(INFO, PMD, "VF %u: disabling multicast promiscuous\n", vf); + + vmolr &= ~IXGBE_VMOLR_MPE; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +static int ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -423,6 +457,9 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV); + /* Disable multicast promiscuous at reset */ + ixgbe_disable_vf_mc_promisc(dev, vf); + /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN); @@ -469,6 +506,9 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32 uint32_t reg_val; int i; + /* Disable multicast promiscuous first */ + ixgbe_disable_vf_mc_promisc(dev, vf); + /* only so many hash values supported */ nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES); @@ -546,6 +586,7 @@ ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) switch (api_version) { case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: vfinfo[vf].api_version = (uint8_t)api_version; return 0; default: @@ -569,6 +610,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) switch (vfinfo[vf].api_version) { case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: break; default: return -1; @@ -590,6 +632,26 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) } static int +ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + bool enable = !!msgbuf[1]; /* msgbuf contains the flag to enable */ + + switch (vfinfo[vf].api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -1; + } + + if (enable) + return ixgbe_enable_vf_mc_promisc(dev, vf); + else + return ixgbe_disable_vf_mc_promisc(dev, vf); +} + +static int ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) { uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -641,6 +703,9 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) retval = ixgbe_get_vf_queues(dev, vf, msgbuf); msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE; break; + case IXGBE_VF_UPDATE_XCAST_MODE: + retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf); + break; default: PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]); retval = IXGBE_ERR_MBX;