From patchwork Fri Sep 12 06:33:53 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jijiang Liu X-Patchwork-Id: 363 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id DF2C6B396; Fri, 12 Sep 2014 08:28:56 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id D9608B396 for ; Fri, 12 Sep 2014 08:28:53 +0200 (CEST) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga101.fm.intel.com with ESMTP; 11 Sep 2014 23:34:10 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,862,1389772800"; d="scan'208";a="385121514" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by FMSMGA003.fm.intel.com with ESMTP; 11 Sep 2014 23:28:59 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id s8C6Y7Vk005974; Fri, 12 Sep 2014 14:34:07 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s8C6Y5px010802; Fri, 12 Sep 2014 14:34:07 +0800 Received: (from jijiangl@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s8C6Y5a0010798; Fri, 12 Sep 2014 14:34:05 +0800 From: Jijiang Liu To: dev@dpdk.org Date: Fri, 12 Sep 2014 14:33:53 +0800 Message-Id: <1410503639-10753-3-git-send-email-jijiang.liu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1410503639-10753-1-git-send-email-jijiang.liu@intel.com> References: <1410503639-10753-1-git-send-email-jijiang.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 2/8]i40e:support VxLAN packet identification in librte_pmd_i40e X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support tunneling UDP port configuration on i40e in librte_pmd_i40e. Currently, only VxLAN is implemented, which include - VxLAN UDP port initialization - Implement the APIs to configure VxLAN UDP port in librte_pmd_i40e. Signed-off-by: Jijiang Liu Acked-by: Helin Zhang Acked-by: Jingjing Wu Acked-by: Jing Chen --- config/common_linuxapp | 5 + lib/librte_mbuf/rte_mbuf.h | 2 + lib/librte_pmd_i40e/i40e_ethdev.c | 200 ++++++++++++++++++++++++++++++++++++- lib/librte_pmd_i40e/i40e_ethdev.h | 5 + lib/librte_pmd_i40e/i40e_rxtx.c | 11 ++ 5 files changed, 222 insertions(+), 1 deletions(-) diff --git a/config/common_linuxapp b/config/common_linuxapp index 9047975..b5ecf15 100644 --- a/config/common_linuxapp +++ b/config/common_linuxapp @@ -212,6 +212,11 @@ CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4 CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1 # +# Compile tunneling UDP port support +# +CONFIG_RTE_LIBRTE_TUNNEL_UDP_PORT=4789 + +# # Compile burst-oriented VIRTIO PMD driver # CONFIG_RTE_LIBRTE_VIRTIO_PMD=y diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 2735f37..1832e73 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -594,6 +594,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m) m->pkt.in_port = 0xff; m->ol_flags = 0; + m->reserved = 0; buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ? RTE_PKTMBUF_HEADROOM : m->buf_len; m->pkt.data = (char*) m->buf_addr + buf_ofs; @@ -658,6 +659,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md) mi->pkt.pkt_len = mi->pkt.data_len; mi->pkt.nb_segs = 1; mi->ol_flags = md->ol_flags; + mi->reserved = md->reserved; __rte_mbuf_sanity_check(mi, RTE_MBUF_PKT, 1); __rte_mbuf_sanity_check(md, RTE_MBUF_PKT, 0); diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c index 4e65ca4..4234073 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.c +++ b/lib/librte_pmd_i40e/i40e_ethdev.c @@ -189,7 +189,7 @@ static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool, static int i40e_dev_init_vlan(struct rte_eth_dev *dev); static int i40e_veb_release(struct i40e_veb *veb); static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, - struct i40e_vsi *vsi); + struct i40e_vsi *vsi); static int i40e_pf_config_mq_rx(struct i40e_pf *pf); static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, @@ -205,6 +205,14 @@ static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, + uint8_t count); +static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, + uint8_t count); +static int i40e_pf_config_vxlan(struct i40e_pf *pf); + /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1]; @@ -256,6 +264,8 @@ static struct eth_dev_ops i40e_eth_dev_ops = { .reta_query = i40e_dev_rss_reta_query, .rss_hash_update = i40e_dev_rss_hash_update, .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, + .udp_tunnel_add = i40e_dev_udp_tunnel_add, + .udp_tunnel_del = i40e_dev_udp_tunnel_del, }; static struct eth_driver rte_i40e_pmd = { @@ -2529,6 +2539,34 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi) return 0; } +static int +i40e_vxlan_filters_init(struct i40e_pf *pf) +{ + uint8_t filter_index; + int ret = 0; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + if (!(pf->flags & I40E_FLAG_VXLAN)) + return 0; + + /* Init first entry in tunneling UDP table */ + ret = i40e_aq_add_udp_tunnel(hw, RTE_LIBRTE_TUNNEL_UDP_PORT, + I40E_AQC_TUNNEL_TYPE_VXLAN, + &filter_index, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add UDP tunnel port %d " + "with index=%d\n", RTE_VXLAN_UDP_PORT, + filter_index); + } else { + pf->vxlan_bitmap |= 1; + pf->vxlan_ports[0] = RTE_LIBRTE_TUNNEL_UDP_PORT; + PMD_DRV_LOG(INFO, "Added UDP tunnel port %d with " + "index=%d\n", RTE_VXLAN_UDP_PORT, filter_index); + } + + return ret; +} + /* Setup a VSI */ struct i40e_vsi * i40e_vsi_setup(struct i40e_pf *pf, @@ -3160,6 +3198,12 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi) uint16_t i; i40e_pf_config_mq_rx(pf); + + if (data->dev_conf.tunnel_type == RTE_TUNNEL_TYPE_VXLAN) { + pf->flags |= I40E_FLAG_VXLAN; + i40e_pf_config_vxlan(pf); + } + for (i = 0; i < data->nb_rx_queues; i++) { ret = i40e_rx_queue_init(data->rx_queues[i]); if (ret != I40E_SUCCESS) { @@ -4076,6 +4120,150 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } +static int +i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) +{ + uint8_t i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return -1; +} + +static int +i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx, ret; + uint8_t filter_idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + if (!(pf->flags & I40E_FLAG_VXLAN)) { + PMD_DRV_LOG(ERR, "VxLAN tunneling mode is not configured\n"); + return -EINVAL; + } + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx >= 0) { + PMD_DRV_LOG(ERR, "Port %d already offloaded\n", port); + return -1; + } + + /* Now check if there is space to add the new port */ + idx = i40e_get_vxlan_port_idx(pf, 0); + if (idx < 0) { + PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached," + "not adding port %d\n", port); + return -ENOSPC; + } + + ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN, + &filter_idx, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add VxLAN UDP port %d\n", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d\n", + port, filter_index); + + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[idx] = port; + pf->vxlan_bitmap |= (1 << idx); + + return 0; +} + +static int +i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + idx = i40e_get_vxlan_port_idx(pf, port); + + if (idx < 0) { + PMD_DRV_LOG(ERR, "Port %d doesn't exist\n", port); + return -1; + } + + if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) { + PMD_DRV_LOG(ERR, "Failed to delete VxLAN UDP port %d\n", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d\n", + port, idx); + + pf->vxlan_ports[idx] = 0; + pf->vxlan_bitmap &= ~(1 << idx); + + return 0; +} + +/* configure port of UDP tunneling */ +static int +i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, uint8_t count) +{ + uint16_t i; + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + for (i = 0; i < count; i++, udp_tunnel++) { + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port); + break; + + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now.\n"); + ret = -1; + break; + + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type\n"); + ret = -1; + break; + } + } + + return ret; +} + +static int +i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, uint8_t count) +{ + uint16_t i; + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + for (i = 0; i < count; i++, udp_tunnel++) { + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now.\n"); + ret = -1; + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type\n"); + ret = -1; + break; + } + } + + return ret; +} + /* Configure RSS */ static int i40e_pf_config_rss(struct i40e_pf *pf) @@ -4112,6 +4300,16 @@ i40e_pf_config_rss(struct i40e_pf *pf) return i40e_hw_rss_hash_set(hw, &rss_conf); } +/* Configure VxLAN */ +static int +i40e_pf_config_vxlan(struct i40e_pf *pf) +{ + if (pf->flags & I40E_FLAG_VXLAN) + i40e_vxlan_filters_init(pf); + + return 0; +} + static int i40e_pf_config_mq_rx(struct i40e_pf *pf) { diff --git a/lib/librte_pmd_i40e/i40e_ethdev.h b/lib/librte_pmd_i40e/i40e_ethdev.h index 64deef2..22d0628 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.h +++ b/lib/librte_pmd_i40e/i40e_ethdev.h @@ -60,6 +60,7 @@ #define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4) #define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5) #define I40E_FLAG_FDIR (1ULL << 6) +#define I40E_FLAG_VXLAN (1ULL << 7) #define I40E_FLAG_ALL (I40E_FLAG_RSS | \ I40E_FLAG_DCB | \ I40E_FLAG_VMDQ | \ @@ -216,6 +217,10 @@ struct i40e_pf { uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */ uint16_t vf_nb_qps; /* The number of queue pairs of VF */ uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + + /* store VxLAN UDP ports */ + uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + uint16_t vxlan_bitmap; /* Vxlan bit mask */ }; enum pending_msg { diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c index f153844..a1dce74 100644 --- a/lib/librte_pmd_i40e/i40e_rxtx.c +++ b/lib/librte_pmd_i40e/i40e_rxtx.c @@ -611,6 +611,12 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) I40E_RXD_QW1_STATUS_SHIFT; pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + + /* reserved is used to store packet type for RX side */ + mb->reserved = (uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT); + mb->pkt.data_len = pkt_len; mb->pkt.pkt_len = pkt_len; mb->pkt.vlan_macip.f.vlan_tci = rx_status & @@ -857,6 +863,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); + rxm->reserved = (uint8_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT); rxm->ol_flags = pkt_flags; if (pkt_flags & PKT_RX_RSS_HASH) rxm->pkt.hash.rss = @@ -1010,6 +1018,9 @@ i40e_recv_scattered_pkts(void *rx_queue, pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); + first_seg->reserved = (uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT); first_seg->ol_flags = pkt_flags; if (pkt_flags & PKT_RX_RSS_HASH) rxm->pkt.hash.rss =