From patchwork Wed Aug 19 03:24:11 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 75697 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 25006A04AF; Wed, 19 Aug 2020 05:25:12 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EB9221C0AF; Wed, 19 Aug 2020 05:24:57 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 3CC3A1BE3D for ; Wed, 19 Aug 2020 05:24:55 +0200 (CEST) IronPort-SDR: tUdHEVKbumqVhfCFa1oGX0TAPhULC28VQguQiTeFqaoXJktDVZx9GtAHXouaY7Jextg9k9bUd0 XwI2reGvpfxg== X-IronPort-AV: E=McAfee;i="6000,8403,9717"; a="156113176" X-IronPort-AV: E=Sophos;i="5.76,329,1592895600"; d="scan'208";a="156113176" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Aug 2020 20:24:54 -0700 IronPort-SDR: +PK2/I6DlJUJHEmrgnoEWU5tc2a82UZ1qZQgKZTWpBeki8T55ZinCFv1fpaOBmJKWykFMerul2 sefZMIHkmx9w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,329,1592895600"; d="scan'208";a="441452724" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.56]) by orsmga004.jf.intel.com with ESMTP; 18 Aug 2020 20:24:53 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, zhihong.wang@intel.com Cc: dev@dpdk.org, Marvin Liu Date: Wed, 19 Aug 2020 11:24:11 +0800 Message-Id: <20200819032414.51430-3-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200819032414.51430-1-yong.liu@intel.com> References: <20200819032414.51430-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v1 2/5] vhost: reuse packed ring functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Move parse_ethernet, offload, extbuf functions to header file. These functions will be reused by vhost vectorized path. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index b556eb3bf6..5a5c945551 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -20,6 +20,10 @@ #include #include +#include +#include +#include +#include #include "rte_vhost.h" #include "rte_vdpa.h" #include "rte_vdpa_dev.h" @@ -905,4 +909,215 @@ put_zmbuf(struct zcopy_mbuf *zmbuf) zmbuf->in_use = 0; } +static __rte_always_inline bool +virtio_net_is_inorder(struct virtio_net *dev) +{ + return dev->features & (1ULL << VIRTIO_F_IN_ORDER); +} + +static __rte_always_inline void +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + void *l3_hdr = NULL; + struct rte_ether_hdr *eth_hdr; + uint16_t ethertype; + + eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + + m->l2_len = sizeof(struct rte_ether_hdr); + ethertype = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (ethertype == RTE_ETHER_TYPE_VLAN) { + struct rte_vlan_hdr *vlan_hdr = + (struct rte_vlan_hdr *)(eth_hdr + 1); + + m->l2_len += sizeof(struct rte_vlan_hdr); + ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } + + l3_hdr = (char *)eth_hdr + m->l2_len; + + switch (ethertype) { + case RTE_ETHER_TYPE_IPV4: + ipv4_hdr = l3_hdr; + *l4_proto = ipv4_hdr->next_proto_id; + m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; + *l4_hdr = (char *)l3_hdr + m->l3_len; + m->ol_flags |= PKT_TX_IPV4; + break; + case RTE_ETHER_TYPE_IPV6: + ipv6_hdr = l3_hdr; + *l4_proto = ipv6_hdr->proto; + m->l3_len = sizeof(struct rte_ipv6_hdr); + *l4_hdr = (char *)l3_hdr + m->l3_len; + m->ol_flags |= PKT_TX_IPV6; + break; + default: + m->l3_len = 0; + *l4_proto = 0; + *l4_hdr = NULL; + break; + } +} + +static __rte_always_inline bool +virtio_net_with_host_offload(struct virtio_net *dev) +{ + if (dev->features & + ((1ULL << VIRTIO_NET_F_CSUM) | + (1ULL << VIRTIO_NET_F_HOST_ECN) | + (1ULL << VIRTIO_NET_F_HOST_TSO4) | + (1ULL << VIRTIO_NET_F_HOST_TSO6) | + (1ULL << VIRTIO_NET_F_HOST_UFO))) + return true; + + return false; +} + +static __rte_always_inline void +vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) +{ + uint16_t l4_proto = 0; + void *l4_hdr = NULL; + struct rte_tcp_hdr *tcp_hdr = NULL; + + if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) + return; + + parse_ethernet(m, &l4_proto, &l4_hdr); + if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) { + if (hdr->csum_start == (m->l2_len + m->l3_len)) { + switch (hdr->csum_offset) { + case (offsetof(struct rte_tcp_hdr, cksum)): + if (l4_proto == IPPROTO_TCP) + m->ol_flags |= PKT_TX_TCP_CKSUM; + break; + case (offsetof(struct rte_udp_hdr, dgram_cksum)): + if (l4_proto == IPPROTO_UDP) + m->ol_flags |= PKT_TX_UDP_CKSUM; + break; + case (offsetof(struct rte_sctp_hdr, cksum)): + if (l4_proto == IPPROTO_SCTP) + m->ol_flags |= PKT_TX_SCTP_CKSUM; + break; + default: + break; + } + } + } + + if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_TCPV6: + tcp_hdr = l4_hdr; + m->ol_flags |= PKT_TX_TCP_SEG; + m->tso_segsz = hdr->gso_size; + m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; + break; + case VIRTIO_NET_HDR_GSO_UDP: + m->ol_flags |= PKT_TX_UDP_SEG; + m->tso_segsz = hdr->gso_size; + m->l4_len = sizeof(struct rte_udp_hdr); + break; + default: + VHOST_LOG_DATA(WARNING, + "unsupported gso type %u.\n", hdr->gso_type); + break; + } + } +} + +static void +virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque) +{ + rte_free(opaque); +} + +static int +virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size) +{ + struct rte_mbuf_ext_shared_info *shinfo = NULL; + uint32_t total_len = RTE_PKTMBUF_HEADROOM + size; + uint16_t buf_len; + rte_iova_t iova; + void *buf; + + /* Try to use pkt buffer to store shinfo to reduce the amount of memory + * required, otherwise store shinfo in the new buffer. + */ + if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo)) + shinfo = rte_pktmbuf_mtod(pkt, + struct rte_mbuf_ext_shared_info *); + else { + total_len += sizeof(*shinfo) + sizeof(uintptr_t); + total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t)); + } + + if (unlikely(total_len > UINT16_MAX)) + return -ENOSPC; + + buf_len = total_len; + buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE); + if (unlikely(buf == NULL)) + return -ENOMEM; + + /* Initialize shinfo */ + if (shinfo) { + shinfo->free_cb = virtio_dev_extbuf_free; + shinfo->fcb_opaque = buf; + rte_mbuf_ext_refcnt_set(shinfo, 1); + } else { + shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len, + virtio_dev_extbuf_free, buf); + if (unlikely(shinfo == NULL)) { + rte_free(buf); + VHOST_LOG_DATA(ERR, "Failed to init shinfo\n"); + return -1; + } + } + + iova = rte_malloc_virt2iova(buf); + rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo); + rte_pktmbuf_reset_headroom(pkt); + + return 0; +} + +/* + * Allocate a host supported pktmbuf. + */ +static __rte_always_inline struct rte_mbuf * +virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp, + uint32_t data_len) +{ + struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp); + + if (unlikely(pkt == NULL)) { + VHOST_LOG_DATA(ERR, + "Failed to allocate memory for mbuf.\n"); + return NULL; + } + + if (rte_pktmbuf_tailroom(pkt) >= data_len) + return pkt; + + /* attach an external buffer if supported */ + if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len)) + return pkt; + + /* check if chained buffers are allowed */ + if (!dev->linearbuf) + return pkt; + + /* Data doesn't fit into the buffer and the host supports + * only linear buffers + */ + rte_pktmbuf_free(pkt); + + return NULL; +} + #endif /* _VHOST_NET_CDEV_H_ */ diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index bd9303c8a9..6107662685 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -32,12 +32,6 @@ rxvq_is_mergeable(struct virtio_net *dev) return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF); } -static __rte_always_inline bool -virtio_net_is_inorder(struct virtio_net *dev) -{ - return dev->features & (1ULL << VIRTIO_F_IN_ORDER); -} - static bool is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) { @@ -1804,121 +1798,6 @@ rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id, return virtio_dev_rx_async_submit(dev, queue_id, pkts, count); } -static inline bool -virtio_net_with_host_offload(struct virtio_net *dev) -{ - if (dev->features & - ((1ULL << VIRTIO_NET_F_CSUM) | - (1ULL << VIRTIO_NET_F_HOST_ECN) | - (1ULL << VIRTIO_NET_F_HOST_TSO4) | - (1ULL << VIRTIO_NET_F_HOST_TSO6) | - (1ULL << VIRTIO_NET_F_HOST_UFO))) - return true; - - return false; -} - -static void -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) -{ - struct rte_ipv4_hdr *ipv4_hdr; - struct rte_ipv6_hdr *ipv6_hdr; - void *l3_hdr = NULL; - struct rte_ether_hdr *eth_hdr; - uint16_t ethertype; - - eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); - - m->l2_len = sizeof(struct rte_ether_hdr); - ethertype = rte_be_to_cpu_16(eth_hdr->ether_type); - - if (ethertype == RTE_ETHER_TYPE_VLAN) { - struct rte_vlan_hdr *vlan_hdr = - (struct rte_vlan_hdr *)(eth_hdr + 1); - - m->l2_len += sizeof(struct rte_vlan_hdr); - ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto); - } - - l3_hdr = (char *)eth_hdr + m->l2_len; - - switch (ethertype) { - case RTE_ETHER_TYPE_IPV4: - ipv4_hdr = l3_hdr; - *l4_proto = ipv4_hdr->next_proto_id; - m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; - *l4_hdr = (char *)l3_hdr + m->l3_len; - m->ol_flags |= PKT_TX_IPV4; - break; - case RTE_ETHER_TYPE_IPV6: - ipv6_hdr = l3_hdr; - *l4_proto = ipv6_hdr->proto; - m->l3_len = sizeof(struct rte_ipv6_hdr); - *l4_hdr = (char *)l3_hdr + m->l3_len; - m->ol_flags |= PKT_TX_IPV6; - break; - default: - m->l3_len = 0; - *l4_proto = 0; - *l4_hdr = NULL; - break; - } -} - -static __rte_always_inline void -vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) -{ - uint16_t l4_proto = 0; - void *l4_hdr = NULL; - struct rte_tcp_hdr *tcp_hdr = NULL; - - if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) - return; - - parse_ethernet(m, &l4_proto, &l4_hdr); - if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) { - if (hdr->csum_start == (m->l2_len + m->l3_len)) { - switch (hdr->csum_offset) { - case (offsetof(struct rte_tcp_hdr, cksum)): - if (l4_proto == IPPROTO_TCP) - m->ol_flags |= PKT_TX_TCP_CKSUM; - break; - case (offsetof(struct rte_udp_hdr, dgram_cksum)): - if (l4_proto == IPPROTO_UDP) - m->ol_flags |= PKT_TX_UDP_CKSUM; - break; - case (offsetof(struct rte_sctp_hdr, cksum)): - if (l4_proto == IPPROTO_SCTP) - m->ol_flags |= PKT_TX_SCTP_CKSUM; - break; - default: - break; - } - } - } - - if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { - switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { - case VIRTIO_NET_HDR_GSO_TCPV4: - case VIRTIO_NET_HDR_GSO_TCPV6: - tcp_hdr = l4_hdr; - m->ol_flags |= PKT_TX_TCP_SEG; - m->tso_segsz = hdr->gso_size; - m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; - break; - case VIRTIO_NET_HDR_GSO_UDP: - m->ol_flags |= PKT_TX_UDP_SEG; - m->tso_segsz = hdr->gso_size; - m->l4_len = sizeof(struct rte_udp_hdr); - break; - default: - VHOST_LOG_DATA(WARNING, - "unsupported gso type %u.\n", hdr->gso_type); - break; - } - } -} - static __rte_noinline void copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr, struct buf_vector *buf_vec) @@ -2145,96 +2024,6 @@ get_zmbuf(struct vhost_virtqueue *vq) return NULL; } -static void -virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque) -{ - rte_free(opaque); -} - -static int -virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size) -{ - struct rte_mbuf_ext_shared_info *shinfo = NULL; - uint32_t total_len = RTE_PKTMBUF_HEADROOM + size; - uint16_t buf_len; - rte_iova_t iova; - void *buf; - - /* Try to use pkt buffer to store shinfo to reduce the amount of memory - * required, otherwise store shinfo in the new buffer. - */ - if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo)) - shinfo = rte_pktmbuf_mtod(pkt, - struct rte_mbuf_ext_shared_info *); - else { - total_len += sizeof(*shinfo) + sizeof(uintptr_t); - total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t)); - } - - if (unlikely(total_len > UINT16_MAX)) - return -ENOSPC; - - buf_len = total_len; - buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE); - if (unlikely(buf == NULL)) - return -ENOMEM; - - /* Initialize shinfo */ - if (shinfo) { - shinfo->free_cb = virtio_dev_extbuf_free; - shinfo->fcb_opaque = buf; - rte_mbuf_ext_refcnt_set(shinfo, 1); - } else { - shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len, - virtio_dev_extbuf_free, buf); - if (unlikely(shinfo == NULL)) { - rte_free(buf); - VHOST_LOG_DATA(ERR, "Failed to init shinfo\n"); - return -1; - } - } - - iova = rte_malloc_virt2iova(buf); - rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo); - rte_pktmbuf_reset_headroom(pkt); - - return 0; -} - -/* - * Allocate a host supported pktmbuf. - */ -static __rte_always_inline struct rte_mbuf * -virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp, - uint32_t data_len) -{ - struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp); - - if (unlikely(pkt == NULL)) { - VHOST_LOG_DATA(ERR, - "Failed to allocate memory for mbuf.\n"); - return NULL; - } - - if (rte_pktmbuf_tailroom(pkt) >= data_len) - return pkt; - - /* attach an external buffer if supported */ - if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len)) - return pkt; - - /* check if chained buffers are allowed */ - if (!dev->linearbuf) - return pkt; - - /* Data doesn't fit into the buffer and the host supports - * only linear buffers - */ - rte_pktmbuf_free(pkt); - - return NULL; -} - static __rte_noinline uint16_t virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)