get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/21040/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 21040,
    "url": "https://patches.dpdk.org/api/patches/21040/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1488414008-162839-12-git-send-email-allain.legacy@windriver.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1488414008-162839-12-git-send-email-allain.legacy@windriver.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1488414008-162839-12-git-send-email-allain.legacy@windriver.com",
    "date": "2017-03-02T00:20:03",
    "name": "[dpdk-dev,v3,11/16] net/avp: packet receive functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "2cdd2ab8b4e3e9e090e49c49d78a9cf5764eadd1",
    "submitter": {
        "id": 679,
        "url": "https://patches.dpdk.org/api/people/679/?format=api",
        "name": "Allain Legacy",
        "email": "allain.legacy@windriver.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1488414008-162839-12-git-send-email-allain.legacy@windriver.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/21040/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/21040/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id B42EDF973;\n\tThu,  2 Mar 2017 01:21:56 +0100 (CET)",
            "from mail5.wrs.com (mail5.windriver.com [192.103.53.11])\n\tby dpdk.org (Postfix) with ESMTP id 8BFD74CE4\n\tfor <dev@dpdk.org>; Thu,  2 Mar 2017 01:20:49 +0100 (CET)",
            "from ALA-HCA.corp.ad.wrs.com (ala-hca.corp.ad.wrs.com\n\t[147.11.189.40])\n\tby mail5.wrs.com (8.15.2/8.15.2) with ESMTPS id v220KbJo011291\n\t(version=TLSv1 cipher=AES128-SHA bits=128 verify=OK);\n\tWed, 1 Mar 2017 16:20:37 -0800",
            "from yow-cgts4-lx.wrs.com (128.224.145.137) by\n\tALA-HCA.corp.ad.wrs.com (147.11.189.50) with Microsoft SMTP Server\n\t(TLS) id 14.3.294.0; Wed, 1 Mar 2017 16:20:36 -0800"
        ],
        "From": "Allain Legacy <allain.legacy@windriver.com>",
        "To": "<ferruh.yigit@intel.com>",
        "CC": "<ian.jolliffe@windriver.com>, <jerin.jacob@caviumnetworks.com>,\n\t<stephen@networkplumber.org>, <thomas.monjalon@6wind.com>, <dev@dpdk.org>",
        "Date": "Wed, 1 Mar 2017 19:20:03 -0500",
        "Message-ID": "<1488414008-162839-12-git-send-email-allain.legacy@windriver.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1488414008-162839-1-git-send-email-allain.legacy@windriver.com>",
        "References": "<1488136143-116389-1-git-send-email-allain.legacy@windriver.com>\n\t<1488414008-162839-1-git-send-email-allain.legacy@windriver.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[128.224.145.137]",
        "Subject": "[dpdk-dev] [PATCH v3 11/16] net/avp: packet receive functions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Adds function required for receiving packets from the host application via\nAVP device queues.  Both the simple and scattered functions are supported.\n\nSigned-off-by: Allain Legacy <allain.legacy@windriver.com>\nSigned-off-by: Matt Peters <matt.peters@windriver.com>\n---\n drivers/net/avp/Makefile     |   1 +\n drivers/net/avp/avp_ethdev.c | 461 +++++++++++++++++++++++++++++++++++++++++++\n 2 files changed, 462 insertions(+)",
    "diff": "diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile\nindex 9cf0449..3013cd1 100644\n--- a/drivers/net/avp/Makefile\n+++ b/drivers/net/avp/Makefile\n@@ -56,5 +56,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp_ethdev.c\n \n # this lib depends upon:\n DEPDIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += lib/librte_eal lib/librte_ether\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += lib/librte_mempool lib/librte_mbuf\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c\nindex b0c5ae4..836d4e4 100644\n--- a/drivers/net/avp/avp_ethdev.c\n+++ b/drivers/net/avp/avp_ethdev.c\n@@ -86,11 +86,19 @@ static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t\t  unsigned int socket_id,\n \t\t\t\t  const struct rte_eth_txconf *tx_conf);\n \n+static uint16_t avp_recv_scattered_pkts(void *rx_queue,\n+\t\t\t\t\tstruct rte_mbuf **rx_pkts,\n+\t\t\t\t\tuint16_t nb_pkts);\n+\n+static uint16_t avp_recv_pkts(void *rx_queue,\n+\t\t\t      struct rte_mbuf **rx_pkts,\n+\t\t\t      uint16_t nb_pkts);\n static void avp_dev_rx_queue_release(void *rxq);\n static void avp_dev_tx_queue_release(void *txq);\n #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)\n \n \n+#define AVP_MAX_RX_BURST 64\n #define AVP_MAX_MAC_ADDRS 1\n #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN\n \n@@ -328,6 +336,15 @@ struct avp_queue {\n \treturn ret == 0 ? request.result : ret;\n }\n \n+/* translate from host mbuf virtual address to guest virtual address */\n+static inline void *\n+avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)\n+{\n+\treturn RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,\n+\t\t\t\t       (uintptr_t)avp->host_mbuf_addr),\n+\t\t\t   (uintptr_t)avp->mbuf_addr);\n+}\n+\n /* translate from host physical address to guest virtual address */\n static void *\n avp_dev_translate_address(struct rte_eth_dev *eth_dev,\n@@ -904,6 +921,7 @@ struct avp_queue {\n \n \tpci_dev = AVP_DEV_TO_PCI(eth_dev);\n \teth_dev->dev_ops = &avp_eth_dev_ops;\n+\teth_dev->rx_pkt_burst = &avp_recv_pkts;\n \n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY) {\n \t\t/*\n@@ -912,6 +930,10 @@ struct avp_queue {\n \t\t * be mapped to the same virtual address so all pointers should\n \t\t * be valid.\n \t\t */\n+\t\tif (eth_dev->data->scattered_rx) {\n+\t\t\tPMD_DRV_LOG(NOTICE, \"AVP device configured for chained mbufs\\n\");\n+\t\t\teth_dev->rx_pkt_burst = avp_recv_scattered_pkts;\n+\t\t}\n \t\treturn 0;\n \t}\n \n@@ -993,6 +1015,38 @@ struct avp_queue {\n \n \n static int\n+avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,\n+\t\t\t struct avp_dev *avp)\n+{\n+\tunsigned int max_rx_pkt_len;\n+\n+\tmax_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\n+\tif ((max_rx_pkt_len > avp->guest_mbuf_size) ||\n+\t    (max_rx_pkt_len > avp->host_mbuf_size)) {\n+\t\t/*\n+\t\t * If the guest MTU is greater than either the host or guest\n+\t\t * buffers then chained mbufs have to be enabled in the TX\n+\t\t * direction.  It is assumed that the application will not need\n+\t\t * to send packets larger than their max_rx_pkt_len (MRU).\n+\t\t */\n+\t\treturn 1;\n+\t}\n+\n+\tif ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||\n+\t    (avp->max_rx_pkt_len > avp->host_mbuf_size)) {\n+\t\t/*\n+\t\t * If the host MRU is greater than its own mbuf size or the\n+\t\t * guest mbuf size then chained mbufs have to be enabled in the\n+\t\t * RX direction.\n+\t\t */\n+\t\treturn 1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \t\t       uint16_t rx_queue_id,\n \t\t       uint16_t nb_rx_desc,\n@@ -1018,6 +1072,14 @@ struct avp_queue {\n \tavp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);\n \tavp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;\n \n+\tif (avp_dev_enable_scattered(eth_dev, avp)) {\n+\t\tif (!eth_dev->data->scattered_rx) {\n+\t\t\tPMD_DRV_LOG(NOTICE, \"AVP device configured for chained mbufs\\n\");\n+\t\t\teth_dev->data->scattered_rx = 1;\n+\t\t\teth_dev->rx_pkt_burst = avp_recv_scattered_pkts;\n+\t\t}\n+\t}\n+\n \tPMD_DRV_LOG(DEBUG, \"AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\\n\",\n \t\t    avp->max_rx_pkt_len,\n \t\t    eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,\n@@ -1088,6 +1150,405 @@ struct avp_queue {\n \treturn 0;\n }\n \n+static inline int\n+_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)\n+{\n+\tuint16_t *_a = (uint16_t *)&a->addr_bytes[0];\n+\tuint16_t *_b = (uint16_t *)&b->addr_bytes[0];\n+\treturn (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);\n+}\n+\n+static inline int\n+_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)\n+{\n+\tstruct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);\n+\n+\tif (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->d_addr) == 0)) {\n+\t\t/* allow all packets destined to our address */\n+\t\treturn 0;\n+\t}\n+\n+\tif (likely(is_broadcast_ether_addr(&eth->d_addr))) {\n+\t\t/* allow all broadcast packets */\n+\t\treturn 0;\n+\t}\n+\n+\tif (likely(is_multicast_ether_addr(&eth->d_addr))) {\n+\t\t/* allow all multicast packets */\n+\t\treturn 0;\n+\t}\n+\n+\tif (avp->flags & AVP_F_PROMISC) {\n+\t\t/* allow all packets when in promiscuous mode */\n+\t\treturn 0;\n+\t}\n+\n+\treturn -1;\n+}\n+\n+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS\n+static inline void\n+__avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)\n+{\n+\tstruct rte_avp_desc *first_buf;\n+\tstruct rte_avp_desc *pkt_buf;\n+\tunsigned int pkt_len;\n+\tunsigned int nb_segs;\n+\tvoid *pkt_data;\n+\tunsigned int i;\n+\n+\tfirst_buf = avp_dev_translate_buffer(avp, buf);\n+\n+\ti = 0;\n+\tpkt_len = 0;\n+\tnb_segs = first_buf->nb_segs;\n+\tdo {\n+\t\t/* Adjust pointers for guest addressing */\n+\t\tpkt_buf = avp_dev_translate_buffer(avp, buf);\n+\t\tif (pkt_buf == NULL)\n+\t\t\trte_panic(\"bad buffer: segment %u has an invalid address %p\\n\",\n+\t\t\t\t  i, buf);\n+\t\tpkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);\n+\t\tif (pkt_data == NULL)\n+\t\t\trte_panic(\"bad buffer: segment %u has a NULL data pointer\\n\",\n+\t\t\t\t  i);\n+\t\tif (pkt_buf->data_len == 0)\n+\t\t\trte_panic(\"bad buffer: segment %u has 0 data length\\n\",\n+\t\t\t\t  i);\n+\t\tpkt_len += pkt_buf->data_len;\n+\t\tnb_segs--;\n+\t\ti++;\n+\n+\t} while (nb_segs && (buf = pkt_buf->next) != NULL);\n+\n+\tif (nb_segs != 0)\n+\t\trte_panic(\"bad buffer: expected %u segments found %u\\n\",\n+\t\t\t  first_buf->nb_segs, (first_buf->nb_segs - nb_segs));\n+\tif (pkt_len != first_buf->pkt_len)\n+\t\trte_panic(\"bad buffer: expected length %u found %u\\n\",\n+\t\t\t  first_buf->pkt_len, pkt_len);\n+}\n+\n+#define avp_dev_buffer_sanity_check(a, b) \\\n+\t__avp_dev_buffer_sanity_check((a), (b))\n+\n+#else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */\n+\n+#define avp_dev_buffer_sanity_check(a, b) do {} while (0)\n+\n+#endif\n+\n+/*\n+ * Copy a host buffer chain to a set of mbufs.\tThis function assumes that\n+ * there exactly the required number of mbufs to copy all source bytes.\n+ */\n+static inline struct rte_mbuf *\n+avp_dev_copy_from_buffers(struct avp_dev *avp,\n+\t\t\t  struct rte_avp_desc *buf,\n+\t\t\t  struct rte_mbuf **mbufs,\n+\t\t\t  unsigned int count)\n+{\n+\tstruct rte_mbuf *m_previous = NULL;\n+\tstruct rte_avp_desc *pkt_buf;\n+\tunsigned int total_length = 0;\n+\tunsigned int copy_length;\n+\tunsigned int src_offset;\n+\tstruct rte_mbuf *m;\n+\tuint16_t ol_flags;\n+\tuint16_t vlan_tci;\n+\tvoid *pkt_data;\n+\tunsigned int i;\n+\n+\tavp_dev_buffer_sanity_check(avp, buf);\n+\n+\t/* setup the first source buffer */\n+\tpkt_buf = avp_dev_translate_buffer(avp, buf);\n+\tpkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);\n+\ttotal_length = pkt_buf->pkt_len;\n+\tsrc_offset = 0;\n+\n+\tif (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {\n+\t\tol_flags = PKT_RX_VLAN_PKT;\n+\t\tvlan_tci = pkt_buf->vlan_tci;\n+\t} else {\n+\t\tol_flags = 0;\n+\t\tvlan_tci = 0;\n+\t}\n+\n+\tfor (i = 0; (i < count) && (buf != NULL); i++) {\n+\t\t/* fill each destination buffer */\n+\t\tm = mbufs[i];\n+\n+\t\tif (m_previous != NULL)\n+\t\t\tm_previous->next = m;\n+\n+\t\tm_previous = m;\n+\n+\t\tdo {\n+\t\t\t/*\n+\t\t\t * Copy as many source buffers as will fit in the\n+\t\t\t * destination buffer.\n+\t\t\t */\n+\t\t\tcopy_length = RTE_MIN((avp->guest_mbuf_size -\n+\t\t\t\t\t       rte_pktmbuf_data_len(m)),\n+\t\t\t\t\t      (pkt_buf->data_len -\n+\t\t\t\t\t       src_offset));\n+\t\t\trte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),\n+\t\t\t\t\t       rte_pktmbuf_data_len(m)),\n+\t\t\t\t   RTE_PTR_ADD(pkt_data, src_offset),\n+\t\t\t\t   copy_length);\n+\t\t\trte_pktmbuf_data_len(m) += copy_length;\n+\t\t\tsrc_offset += copy_length;\n+\n+\t\t\tif (likely(src_offset == pkt_buf->data_len)) {\n+\t\t\t\t/* need a new source buffer */\n+\t\t\t\tbuf = pkt_buf->next;\n+\t\t\t\tif (buf != NULL) {\n+\t\t\t\t\tpkt_buf = avp_dev_translate_buffer(\n+\t\t\t\t\t\tavp, buf);\n+\t\t\t\t\tpkt_data = avp_dev_translate_buffer(\n+\t\t\t\t\t\tavp, pkt_buf->data);\n+\t\t\t\t\tsrc_offset = 0;\n+\t\t\t\t}\n+\t\t\t}\n+\n+\t\t\tif (unlikely(rte_pktmbuf_data_len(m) ==\n+\t\t\t\t     avp->guest_mbuf_size)) {\n+\t\t\t\t/* need a new destination mbuf */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t} while (buf != NULL);\n+\t}\n+\n+\tm = mbufs[0];\n+\tm->ol_flags = ol_flags;\n+\tm->nb_segs = count;\n+\trte_pktmbuf_pkt_len(m) = total_length;\n+\tm->vlan_tci = vlan_tci;\n+\n+\t__rte_mbuf_sanity_check(m, 1);\n+\n+\treturn m;\n+}\n+\n+static uint16_t\n+avp_recv_scattered_pkts(void *rx_queue,\n+\t\t\tstruct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts)\n+{\n+\tstruct avp_queue *rxq = (struct avp_queue *)rx_queue;\n+\tstruct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];\n+\tstruct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];\n+\tstruct avp_dev *avp = rxq->avp;\n+\tstruct rte_avp_desc *pkt_buf;\n+\tstruct rte_avp_fifo *free_q;\n+\tstruct rte_avp_fifo *rx_q;\n+\tstruct rte_avp_desc *buf;\n+\tunsigned int count, avail, n;\n+\tunsigned int guest_mbuf_size;\n+\tstruct rte_mbuf *m;\n+\tunsigned int required;\n+\tunsigned int buf_len;\n+\tunsigned int port_id;\n+\tunsigned int i;\n+\n+\tif (unlikely(avp->flags & AVP_F_DETACHED)) {\n+\t\t/* VM live migration in progress */\n+\t\treturn 0;\n+\t}\n+\n+\tguest_mbuf_size = avp->guest_mbuf_size;\n+\tport_id = avp->port_id;\n+\trx_q = avp->rx_q[rxq->queue_id];\n+\tfree_q = avp->free_q[rxq->queue_id];\n+\n+\t/* setup next queue to service */\n+\trxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?\n+\t\t(rxq->queue_id + 1) : rxq->queue_base;\n+\n+\t/* determine how many slots are available in the free queue */\n+\tcount = avp_fifo_free_count(free_q);\n+\n+\t/* determine how many packets are available in the rx queue */\n+\tavail = avp_fifo_count(rx_q);\n+\n+\t/* determine how many packets can be received */\n+\tcount = RTE_MIN(count, avail);\n+\tcount = RTE_MIN(count, nb_pkts);\n+\tcount = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);\n+\n+\tif (unlikely(count == 0)) {\n+\t\t/* no free buffers, or no buffers on the rx queue */\n+\t\treturn 0;\n+\t}\n+\n+\t/* retrieve pending packets */\n+\tn = avp_fifo_get(rx_q, (void **)&avp_bufs, count);\n+\tPMD_RX_LOG(DEBUG, \"Receiving %u packets from Rx queue at %p\\n\",\n+\t\t   count, rx_q);\n+\n+\tcount = 0;\n+\tfor (i = 0; i < n; i++) {\n+\t\t/* prefetch next entry while processing current one */\n+\t\tif (i + 1 < n) {\n+\t\t\tpkt_buf = avp_dev_translate_buffer(avp,\n+\t\t\t\t\t\t\t   avp_bufs[i + 1]);\n+\t\t\trte_prefetch0(pkt_buf);\n+\t\t}\n+\t\tbuf = avp_bufs[i];\n+\n+\t\t/* Peek into the first buffer to determine the total length */\n+\t\tpkt_buf = avp_dev_translate_buffer(avp, buf);\n+\t\tbuf_len = pkt_buf->pkt_len;\n+\n+\t\t/* Allocate enough mbufs to receive the entire packet */\n+\t\trequired = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;\n+\t\tif (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {\n+\t\t\trxq->dev_data->rx_mbuf_alloc_failed++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* Copy the data from the buffers to our mbufs */\n+\t\tm = avp_dev_copy_from_buffers(avp, buf, mbufs, required);\n+\n+\t\t/* finalize mbuf */\n+\t\tm->port = port_id;\n+\n+\t\tif (_avp_mac_filter(avp, m) != 0) {\n+\t\t\t/* silently discard packets not destined to our MAC */\n+\t\t\trte_pktmbuf_free(m);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* return new mbuf to caller */\n+\t\trx_pkts[count++] = m;\n+\t\trxq->bytes += buf_len;\n+\t}\n+\n+\trxq->packets += count;\n+\n+\t/* return the buffers to the free queue */\n+\tavp_fifo_put(free_q, (void **)&avp_bufs[0], n);\n+\n+\treturn count;\n+}\n+\n+\n+static uint16_t\n+avp_recv_pkts(void *rx_queue,\n+\t      struct rte_mbuf **rx_pkts,\n+\t      uint16_t nb_pkts)\n+{\n+\tstruct avp_queue *rxq = (struct avp_queue *)rx_queue;\n+\tstruct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];\n+\tstruct avp_dev *avp = rxq->avp;\n+\tstruct rte_avp_desc *pkt_buf;\n+\tstruct rte_avp_fifo *free_q;\n+\tstruct rte_avp_fifo *rx_q;\n+\tunsigned int count, avail, n;\n+\tunsigned int pkt_len;\n+\tstruct rte_mbuf *m;\n+\tchar *pkt_data;\n+\tunsigned int i;\n+\n+\tif (unlikely(avp->flags & AVP_F_DETACHED)) {\n+\t\t/* VM live migration in progress */\n+\t\treturn 0;\n+\t}\n+\n+\trx_q = avp->rx_q[rxq->queue_id];\n+\tfree_q = avp->free_q[rxq->queue_id];\n+\n+\t/* setup next queue to service */\n+\trxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?\n+\t\t(rxq->queue_id + 1) : rxq->queue_base;\n+\n+\t/* determine how many slots are available in the free queue */\n+\tcount = avp_fifo_free_count(free_q);\n+\n+\t/* determine how many packets are available in the rx queue */\n+\tavail = avp_fifo_count(rx_q);\n+\n+\t/* determine how many packets can be received */\n+\tcount = RTE_MIN(count, avail);\n+\tcount = RTE_MIN(count, nb_pkts);\n+\tcount = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);\n+\n+\tif (unlikely(count == 0)) {\n+\t\t/* no free buffers, or no buffers on the rx queue */\n+\t\treturn 0;\n+\t}\n+\n+\t/* retrieve pending packets */\n+\tn = avp_fifo_get(rx_q, (void **)&avp_bufs, count);\n+\tPMD_RX_LOG(DEBUG, \"Receiving %u packets from Rx queue at %p\\n\",\n+\t\t   count, rx_q);\n+\n+\tcount = 0;\n+\tfor (i = 0; i < n; i++) {\n+\t\t/* prefetch next entry while processing current one */\n+\t\tif (i < n - 1) {\n+\t\t\tpkt_buf = avp_dev_translate_buffer(avp,\n+\t\t\t\t\t\t\t   avp_bufs[i + 1]);\n+\t\t\trte_prefetch0(pkt_buf);\n+\t\t}\n+\n+\t\t/* Adjust host pointers for guest addressing */\n+\t\tpkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);\n+\t\tpkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);\n+\t\tpkt_len = pkt_buf->pkt_len;\n+\n+\t\tif (unlikely((pkt_len > avp->guest_mbuf_size) ||\n+\t\t\t     (pkt_buf->nb_segs > 1))) {\n+\t\t\t/*\n+\t\t\t * application should be using the scattered receive\n+\t\t\t * function\n+\t\t\t */\n+\t\t\trxq->errors++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* process each packet to be transmitted */\n+\t\tm = rte_pktmbuf_alloc(avp->pool);\n+\t\tif (unlikely(m == NULL)) {\n+\t\t\trxq->dev_data->rx_mbuf_alloc_failed++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* copy data out of the host buffer to our buffer */\n+\t\tm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);\n+\n+\t\t/* initialize the local mbuf */\n+\t\trte_pktmbuf_data_len(m) = pkt_len;\n+\t\trte_pktmbuf_pkt_len(m) = pkt_len;\n+\t\tm->port = avp->port_id;\n+\n+\t\tif (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {\n+\t\t\tm->ol_flags = PKT_RX_VLAN_PKT;\n+\t\t\tm->vlan_tci = pkt_buf->vlan_tci;\n+\t\t}\n+\n+\t\tif (_avp_mac_filter(avp, m) != 0) {\n+\t\t\t/* silently discard packets not destined to our MAC */\n+\t\t\trte_pktmbuf_free(m);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* return new mbuf to caller */\n+\t\trx_pkts[count++] = m;\n+\t\trxq->bytes += pkt_len;\n+\t}\n+\n+\trxq->packets += count;\n+\n+\t/* return the buffers to the free queue */\n+\tavp_fifo_put(free_q, (void **)&avp_bufs[0], n);\n+\n+\treturn count;\n+}\n+\n static void\n avp_dev_rx_queue_release(void *rx_queue)\n {\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "11/16"
    ]
}