get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/37229/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 37229,
    "url": "https://patches.dpdk.org/api/patches/37229/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1522910389-35530-10-git-send-email-Ravi1.kumar@amd.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1522910389-35530-10-git-send-email-Ravi1.kumar@amd.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1522910389-35530-10-git-send-email-Ravi1.kumar@amd.com",
    "date": "2018-04-05T06:39:42",
    "name": "[dpdk-dev,v4,10/17] net/axgbe: add transmit and receive data path apis",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "feb2b4eebd35b53774c886705d41d9fa7c254319",
    "submitter": {
        "id": 819,
        "url": "https://patches.dpdk.org/api/people/819/?format=api",
        "name": "Kumar, Ravi1",
        "email": "ravi1.kumar@amd.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1522910389-35530-10-git-send-email-Ravi1.kumar@amd.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/37229/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/37229/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 3CBA91C991;\n\tThu,  5 Apr 2018 08:40:43 +0200 (CEST)",
            "from NAM02-SN1-obe.outbound.protection.outlook.com\n\t(mail-sn1nam02on0042.outbound.protection.outlook.com [104.47.36.42])\n\tby dpdk.org (Postfix) with ESMTP id E78461C95A\n\tfor <dev@dpdk.org>; Thu,  5 Apr 2018 08:40:26 +0200 (CEST)",
            "from wallaby-smavila.amd.com (202.56.249.162) by\n\tBN6PR12MB1505.namprd12.prod.outlook.com (2603:10b6:405:11::18) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n\t15.20.653.12; Thu, 5 Apr 2018 06:40:25 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=amdcloud.onmicrosoft.com; s=selector1-amd-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n\tbh=ofS7fZZX7pYVamaQ7ucuoR720q3v1V1Vhiu14sL+e5M=;\n\tb=bzg3QbThVIWcIakoSGBxGboVx4MVdLmOf6vLufQsiUGppmticWzcfgyp52qR14HqpgjKovw1YSGA2UYe3fx2CeSyO0uvtA1ykKfOzZLJa5KKgs3bBwBLYAiCnJCOzUHuRcr1BK+JGFbPHlNRtNx7SaPd+twMSZWn6zJ8pXq7+Zw=",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Ravi1.Kumar@amd.com; ",
        "From": "Ravi Kumar <Ravi1.kumar@amd.com>",
        "To": "dev@dpdk.org",
        "Cc": "ferruh.yigit@intel.com",
        "Date": "Thu,  5 Apr 2018 02:39:42 -0400",
        "Message-Id": "<1522910389-35530-10-git-send-email-Ravi1.kumar@amd.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1522910389-35530-1-git-send-email-Ravi1.kumar@amd.com>",
        "References": "<1520584954-130575-1-git-send-email-Ravi1.kumar@amd.com>\n\t<1522910389-35530-1-git-send-email-Ravi1.kumar@amd.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[202.56.249.162]",
        "X-ClientProxiedBy": "BMXPR01CA0040.INDPRD01.PROD.OUTLOOK.COM\n\t(2603:1096:b00:c::26) To BN6PR12MB1505.namprd12.prod.outlook.com\n\t(2603:10b6:405:11::18)",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "6ccc438c-32a5-4056-5dc8-08d59ac01d56",
        "X-MS-Office365-Filtering-HT": "Tenant",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0;\n\tRULEID:(7020095)(4652020)(5600026)(4604075)(48565401081)(4534165)(4627221)(201703031133081)(201702281549075)(2017052603328)(7153060)(7193020);\n\tSRVR:BN6PR12MB1505; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BN6PR12MB1505;\n\t3:CsZ7wwiJL9nDULXDrB8QJPp7yOJ1ksz/uRG3LEgalV1az0ICTQfXVW//aY3pWbqhk8rTr+EMi80nf4TYVzzT2Ev1oaankuF9YaVeeECfyq4vA5G1/QUgXyPtwW5tj23qF4oUiVy5X6TA+cLsEKNLJbIaMMZx9DshgCk6VLpDQOqvENOXZZae4dDZ+H05IAxVYzULpQMnlnoFfvMePr/3O+HykAUjPaDt6LRE7P2jdh/3/D80tGQUS3mr/mao+mnd;\n\t25:kIoqlkgcsNar9f4BItbwTP4E9C/YGKTvfpDiPLt1ipf9Y4Jxmn4vJ9xz7OV5BsmjMkmdzO+7Ct+hcTR/35uNOndAzWeQqVVuTXjx01TuzzgOxrQkslD3lbYvst6H3UqLqlU325OeXZXwHdrWn7rNdOS+Ko/HxwuCUsJKNeSXWWunfOR5Ox6nMhhjCnkc1ha+CDQ+3tREa1mAcmT86mhnrL0gDtFzc8BIIk+sU938Fm3Q4RS35+GDmx1c3hiGzuJFoLSCpPEobhswnSsgWqORpZ1FT9PsutU4lCspBIoaBzKv106/jb6iwTYlycerjHEkT2QSC4NDNUQOfHdm0adUVw==;\n\t31:kuo3VP3Fwhv/skmPwTH/Mtwsqklsb536yLvYyjWQwAlqdTE6FNyGbeWRIWnPi/w6ljIKx2UmdUcRzlYDkX6sIBLgqi6xjkWOfOCV7+v++EdnJBTNFceZx9Uw+y6+OPj9AkHykfTNW5n9jQZ21ou9Fe7AbxmmojLHlW+p+AIuNFdTStRSJJwpBhHLGuECXSq5ynCCurbi3kuWzWvu4y7AwNTFcL5aVMCiuhMaeIrN4CQ=",
            "1; BN6PR12MB1505;\n\t20:3KYrZXol4njW18f+q/HyQqFN78ZF7TorQdLeA6+Cc10jIg6tfdw93g2KwUggtlbromJydOAZOnJvDaVA+ezy5E1h0rj+zqB69PetEs3t6omFRT8PLJkUyjzt6cS5D2LGp/xkzuFcWQ8PfTLX6lwhnpLssIAjRi2rrCrTrSGNMuKa+X993p5ldZV45GpM98e7AR8upWLJH90yeWAQEs2W7K2DxA/F9+w/yppMcfzO7RaSDIawj/6SnDx8Fx1wKqpFXyD5bCn8PPjyOD82CKDX7qTnRrJsPHgZxZUt4MetrrUtn10sfo9wh6lWRtipuR1+U9bZOHilvqbWStanbc9laL+z6h6Dko7NPsXf7YbRywtmRgOAPSgOEW2WrQN0YQdASN0Aq3ih+qS1AlqXvtgvvLlIQpjw/x9vhAlaf0CUh6ZNWyuVFLGDr/5TETxUEPE8xCST5rAl8kW3a7eeOGQZNNtTWZ8hVozYr4F/GTLgf4ZR1r/01gYXGwRkrUoz917y;\n\t4:dy6Y02a27gY4wIRfBHnlVjfUD4VxHvYGlhTm+swxT5znfzcW/0B+6JEzzhXX6sW6aoQ5dNcMC80Kq1iWBlTN7yktiSiDyChS3T+XoErkaynsk6oXSpCdhBxLYG8XZ8FbfUUndLjjOk2b2ZyoIZ3P1mElh9cjHBMkVhmmy7ZPPlbcCO/Wo2irW4L0n7ia7t2c/de5zYYSxgPrppYQkE2vp+avCpU6bmmCejnCQxSuGKnM+6Fw7rpQ9N/bvlm4fKBYC80PkP2mVpNRd4E2nvtcRueMnKsPAhlnlLZ8Ia+dDP/tfBslC76sLMtfu1RsBHVwcmKUUDesIaQBCAvxfHaeanDWQ/Hw6v9CGsyIlkCDxfY=",
            "=?us-ascii?Q?1; BN6PR12MB1505;\n\t23:PKW9jtPIJlc0LMB72RNWvVVP2WKcg/MqGthVz1obT?=\n\t/E6n0cMg95G7LJedlwTzU8nWR3wfsxZr5psm05tlxbCdPsD4ZgEN/VsHGkAXlvCp40EyJWef7z43xmoo/3wy2+YlQTIZABtcs0uXKyqXW+Xyk7HwZDqMA0GQPPgqErgxH773h14oPMf8cGNNVDE6Mfrd7z4MZ25537VsXXYSYUG5au1JxDi1tU7zZKIwylhHnB+lZDjgluiUuZa99wiQvC/nZTJCOd5z/Sb80HjQFfNIpo5YSo2WfT16XJ2fqsI7t65k7JQad9j87pc6lp9skQuatQu0vQorb5oAk4sAZcRkUVOulbb1mET8WMzIn1H5/BH7gnOlUwcxjjEwWvFUBYfBwwbWEfCNtuGItUUKZ8mHAfg56pu/cwnnUrFQ6OwlIG21OKQGFw7ag81D82eIRxAu3fHPRYO1lxNg3asFKKCqtTzCxuTqcXkQwkmW/C5N3Zmzpq9WpgzPZGEJpoCbp/d32ZQO9/pKzPzS7zAn/3tPEZBAUk5AAgF8Wcst8t7YsP+cKdFr/unU7WAvN9iPVlUbJXJIlBFVS8sE19+xyazymeB9hHqyGHygh7Lk2xj5gJVOPkDuhJ1SqYtWgZlQa9bpQdQ7Z14beFtq8h3QO2BHXtFpn5IEZ3wItnJqsKWFlyNF37zrT3wJBGrFseo2kehHrd7tqwDuTPYh4E5EkBopdPOprXH26vmp3KMPBq9C5izMyQqGLBUiR0Owphis29fzoHJOwDNk1OBpnDlUIFAFD5zeDqxAfY0k5O4I4lKODWeaMGdcNmveMiKZFMPUICK8SVsGbmb0BsJ1Y5XqK2mnURGq45J5qFwFYEL/7x/PEA21uZimzoYfqGG7z7+ecIFGJ688CAjJTshChUPUqzlq2o96CVUa9Sy6uhI9oswT5X+ybkaoyfsHantkNIJjAa75EgK4a3jWdVWU2fVLPDZT7UeMhskNqj4Sq2K3jxUoATsS3rA64SQLsZNRDkO+K2KPE/OpEFNggTo7vQXAXIYW7wruPMC+GXeWVkMhRWlu/6HuswZyMQpPBAwj+IRvSwDll8knq4P5tHIoDzDL1KR4JmlEA2WOWvPwIK9Xmp2rvJKnqX0KNc8OvyoCBYDXlddioTHmNTu6FFzekc7o+AyAUorm2MYHrbjEqWWY+SnUJmGXMrL1MaKK9uRJJNBRs24itP6yKgoYJvB+95DtpIFjCS00GqolSVeCXCalM9fctOH1f8sCKM1Lhlnq52dpa0FXjB2YMVlJ3dTJ8a7lERJmA==",
            "1; BN6PR12MB1505;\n\t6:xNsLE+sfH2eXcOfy7PWZzoDSmb76CWAsabSZklI93ZroIlgwLtU651blp3c+PeKoa0dZ0AYnH/RQKZd2bkV6J7iBX3dfFpQI5oYPRQmCs38czxmKhYF6YmhkF5D2WsX7D54jlJ4uI/6EHZCG6qqZD6TMdPQVX7nZP/Bg2iC/JE3ZmOeJvQ3S5ceGNbPAexrxKjElftZuYKWEiiuDUv6+pcRk8kctc4kX1hRg6TjA25WwLrA/RqQT7HJPmyJcCfkgLtRIEHWeojOymzwO8hhwXkUwr2CmWhNec/kEzIk9CUftV22S0BMJMN4tOk4EkoU8j4/OVbWbvNchaew3mzNCi6XzDVCkWCLM3ltd/jv+HB0TzbW1bOd2Qi/Y91kj+HMyuw5llRU+iuor5CK8pbkJxguUSoyypml8GMFLmhArbcOvMBH3EAV/DPaADvw7Lm8EFDufJy6IkjT5OWO2Uw5ybg==;\n\t5:dMExrPkx4c33l8mIrAksU7lRHHmyTYAv12TI/oMVLxcSoIJn45ZpxanZfQXes9DcmkQsJaOV8hxDGKCzBh7BOjNDE7RrF+1W3W1kbsXi0budJLIN/0cZXQTzWT3DQB3CzPNG9fVUMJ6DHoZ6z71P+dv4JA8wP+fLyF+InYMZuMs=;\n\t24:S2MrVcPySH/6thdqZALoPInCu4yoZls7MNjvCWcrK+nEUp1S4f83uR7dMpkBHUnnbvBQoJU4mCa6SOEOP4D1eL3Un6boZNUbh+gKL/qVNHs=",
            "1; BN6PR12MB1505;\n\t7:jLQEvZCxRLpedVTWudeBDThtTEpZE9+0ysW5p9Tzl4+abm9SdREanEJ+KyABfHBQRTQ1Rd8FU0BR2oVmuuWH01C3LG9K+6vetGSuFwlcxchkM2rJmp+b9H9P01naHojzt6lnmYpeDxgs/EOOJNMorW9a7ycP9bLG2wWQ+BtNRM2104vJNIDVt8qczww3YgbyPuFtUHl1vE3Ou6KWyDnlX44+aiG6KG1RKrU3PzdTZhG2NL8y9WNMynpw+rk5o/H6;\n\t20:qoq8X8o0+JSBOe9974VBg7lGYdCdN4jhrag4gT8K7SAxEVbXHnwgyD8t0z0sM06HjirF90+xyGP+6yR1D7g5E4w+SMazkbrRm7bkPB7n2/A32tyavc0iOX4WDYjtS1IAT0pNrmtc8hSTt526saDrdrIPRfipvIh9heT0hd+UH1kLpLqzQ7vwmNt+5xAmJNRsJg0YU7HlIrS3AmNoTlsCrBlcg2/EFk10LNoYxIQUn5LOyajDZctGYTKY8HdCZAvJ"
        ],
        "X-MS-TrafficTypeDiagnostic": "BN6PR12MB1505:",
        "X-Microsoft-Antispam-PRVS": "<BN6PR12MB15050EACFCD0EC69CB0BC065AEBB0@BN6PR12MB1505.namprd12.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:(60795455431006)(767451399110);",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(8211001083)(6040522)(2401047)(8121501046)(5005006)(3002001)(3231221)(944501327)(52105095)(93006095)(93001095)(10201501046)(6055026)(6041310)(20161123564045)(20161123562045)(20161123558120)(201703131423095)(201702281528075)(20161123555045)(201703061421075)(201703061406153)(20161123560045)(6072148)(201708071742011);\n\tSRVR:BN6PR12MB1505; BCL:0; PCL:0; RULEID:; SRVR:BN6PR12MB1505; ",
        "X-Forefront-PRVS": "06339BAE63",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(366004)(346002)(376002)(396003)(39860400002)(39380400002)(199004)(189003)(3846002)(476003)(2616005)(316002)(446003)(956004)(305945005)(7736002)(478600001)(16586007)(16526019)(6486002)(186003)(76176011)(5660300001)(59450400001)(97736004)(2906002)(6116002)(486006)(386003)(26005)(50466002)(11346002)(25786009)(575784001)(8936002)(86362001)(50226002)(66066001)(47776003)(48376002)(2351001)(6666003)(2361001)(6916009)(4326008)(81166006)(36756003)(53416004)(51416003)(68736007)(7696005)(8676002)(106356001)(105586002)(52116002)(81156014)(53936002)(72206003);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:BN6PR12MB1505;\n\tH:wallaby-smavila.amd.com; FPR:; \n\tSPF:None; LANG:en; PTR:InfoNoRecords; A:1; MX:1; ",
        "Received-SPF": "None (protection.outlook.com: amd.com does not designate\n\tpermitted sender hosts)",
        "X-Microsoft-Antispam-Message-Info": "m3eYNrNQRh2rf/1cXkRCsFQCHDGFIrE5aRz4s5rDReqGR7JPxJ6k8wYKZGDnAP2r1AK8WyKfERSoPi6jZk2fDK9zSJ7q06qFJhVxyE5i+k2KsYk7j0ShdsYh3oR59RCbuU7HhD/dQTiXotpUAGlQFacFe/BlAFWF0ohei6qWnFcw3cyWloOV434BEhWil44X",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-OriginatorOrg": "amd.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "05 Apr 2018 06:40:25.0343\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "6ccc438c-32a5-4056-5dc8-08d59ac01d56",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-CrossTenant-Id": "3dd8961f-e488-4e60-8e11-a82d994e183d",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR12MB1505",
        "Subject": "[dpdk-dev] [PATCH v4 10/17] net/axgbe: add transmit and receive\n\tdata path apis",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Supported scalar implementation for RX data path\nSupported scalar and vector implementation for TX data path\n\nSigned-off-by: Ravi Kumar <Ravi1.kumar@amd.com>\n---\n drivers/net/axgbe/Makefile             |   1 +\n drivers/net/axgbe/axgbe_ethdev.c       |  22 +-\n drivers/net/axgbe/axgbe_rxtx.c         | 429 +++++++++++++++++++++++++++++++++\n drivers/net/axgbe/axgbe_rxtx.h         |  19 ++\n drivers/net/axgbe/axgbe_rxtx_vec_sse.c |  93 +++++++\n 5 files changed, 563 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/axgbe/axgbe_rxtx_vec_sse.c",
    "diff": "diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile\nindex 9fd7b5e..aff7917 100644\n--- a/drivers/net/axgbe/Makefile\n+++ b/drivers/net/axgbe/Makefile\n@@ -24,5 +24,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c\n SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c\n SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c\n SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c\n+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c\nindex f8cfbd8..a293058 100644\n--- a/drivers/net/axgbe/axgbe_ethdev.c\n+++ b/drivers/net/axgbe/axgbe_ethdev.c\n@@ -102,9 +102,22 @@ axgbe_dev_interrupt_handler(void *param)\n {\n \tstruct rte_eth_dev *dev = (struct rte_eth_dev *)param;\n \tstruct axgbe_port *pdata = dev->data->dev_private;\n+\tunsigned int dma_isr, dma_ch_isr;\n \n \tpdata->phy_if.an_isr(pdata);\n-\n+\t/*DMA related interrupts*/\n+\tdma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);\n+\tif (dma_isr) {\n+\t\tif (dma_isr & 1) {\n+\t\t\tdma_ch_isr =\n+\t\t\t\tAXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)\n+\t\t\t\t\t\t  pdata->rx_queues[0],\n+\t\t\t\t\t\t  DMA_CH_SR);\n+\t\t\tAXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)\n+\t\t\t\t\t   pdata->rx_queues[0],\n+\t\t\t\t\t   DMA_CH_SR, dma_ch_isr);\n+\t\t}\n+\t}\n \t/* Enable interrupts since disabled after generation*/\n \trte_intr_enable(&pdata->pci_dev->intr_handle);\n }\n@@ -166,6 +179,8 @@ axgbe_dev_start(struct rte_eth_dev *dev)\n \n \t/* phy start*/\n \tpdata->phy_if.phy_start(pdata);\n+\taxgbe_dev_enable_tx(dev);\n+\taxgbe_dev_enable_rx(dev);\n \n \taxgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);\n \taxgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);\n@@ -185,6 +200,8 @@ axgbe_dev_stop(struct rte_eth_dev *dev)\n \t\treturn;\n \n \taxgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);\n+\taxgbe_dev_disable_tx(dev);\n+\taxgbe_dev_disable_rx(dev);\n \n \tpdata->phy_if.phy_stop(pdata);\n \tpdata->hw_if.exit(pdata);\n@@ -423,6 +440,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)\n \tint ret;\n \n \teth_dev->dev_ops = &axgbe_eth_dev_ops;\n+\teth_dev->rx_pkt_burst = &axgbe_recv_pkts;\n \n \t/*\n \t * For secondary processes, we don't initialise any further as primary\n@@ -573,6 +591,8 @@ eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)\n \trte_free(eth_dev->data->mac_addrs);\n \teth_dev->data->mac_addrs = NULL;\n \teth_dev->dev_ops = NULL;\n+\teth_dev->rx_pkt_burst = NULL;\n+\teth_dev->tx_pkt_burst = NULL;\n \taxgbe_dev_clear_queues(eth_dev);\n \n \t/* disable uio intr before callback unregister */\ndiff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c\nindex 1dff7c8..cdc428c 100644\n--- a/drivers/net/axgbe/axgbe_rxtx.c\n+++ b/drivers/net/axgbe/axgbe_rxtx.c\n@@ -113,6 +113,197 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \treturn 0;\n }\n \n+static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,\n+\t\t\t\t  unsigned int queue)\n+{\n+\tunsigned int rx_status;\n+\tunsigned long rx_timeout;\n+\n+\t/* The Rx engine cannot be stopped if it is actively processing\n+\t * packets. Wait for the Rx queue to empty the Rx fifo.  Don't\n+\t * wait forever though...\n+\t */\n+\trx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *\n+\t\t\t\t\t       rte_get_timer_hz());\n+\n+\twhile (time_before(rte_get_timer_cycles(), rx_timeout)) {\n+\t\trx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);\n+\t\tif ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&\n+\t\t    (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))\n+\t\t\tbreak;\n+\n+\t\trte_delay_us(900);\n+\t}\n+\n+\tif (!time_before(rte_get_timer_cycles(), rx_timeout))\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"timed out waiting for Rx queue %u to empty\\n\",\n+\t\t\t    queue);\n+}\n+\n+void axgbe_dev_disable_rx(struct rte_eth_dev *dev)\n+{\n+\tstruct axgbe_rx_queue *rxq;\n+\tstruct axgbe_port *pdata = dev->data->dev_private;\n+\tunsigned int i;\n+\n+\t/* Disable MAC Rx */\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);\n+\n+\t/* Prepare for Rx DMA channel stop */\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\taxgbe_prepare_rx_stop(pdata, i);\n+\t}\n+\t/* Disable each Rx queue */\n+\tAXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\t/* Disable Rx DMA channel */\n+\t\tAXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);\n+\t}\n+}\n+\n+void axgbe_dev_enable_rx(struct rte_eth_dev *dev)\n+{\n+\tstruct axgbe_rx_queue *rxq;\n+\tstruct axgbe_port *pdata = dev->data->dev_private;\n+\tunsigned int i;\n+\tunsigned int reg_val = 0;\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\t/* Enable Rx DMA channel */\n+\t\tAXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);\n+\t}\n+\n+\treg_val = 0;\n+\tfor (i = 0; i < pdata->rx_q_count; i++)\n+\t\treg_val |= (0x02 << (i << 1));\n+\tAXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);\n+\n+\t/* Enable MAC Rx */\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);\n+\t/* Frame is forwarded after stripping CRC to application*/\n+\tif (pdata->crc_strip_enable) {\n+\t\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);\n+\t\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);\n+\t}\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);\n+}\n+\n+/* Rx function one to one refresh */\n+uint16_t\n+axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\tuint16_t nb_rx = 0;\n+\tstruct axgbe_rx_queue *rxq = rx_queue;\n+\tvolatile union axgbe_rx_desc *desc;\n+\tuint64_t old_dirty = rxq->dirty;\n+\tstruct rte_mbuf *mbuf, *tmbuf;\n+\tunsigned int err;\n+\tuint32_t error_status;\n+\tuint16_t idx, pidx, pkt_len;\n+\n+\tidx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);\n+\twhile (nb_rx < nb_pkts) {\n+\t\tif (unlikely(idx == rxq->nb_desc))\n+\t\t\tidx = 0;\n+\n+\t\tdesc = &rxq->desc[idx];\n+\n+\t\tif (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))\n+\t\t\tbreak;\n+\t\ttmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tif (unlikely(!tmbuf)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"RX mbuf alloc failed port_id = %u\"\n+\t\t\t\t    \" queue_id = %u\\n\",\n+\t\t\t\t    (unsigned int)rxq->port_id,\n+\t\t\t\t    (unsigned int)rxq->queue_id);\n+\t\t\trte_eth_devices[\n+\t\t\t\trxq->port_id].data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t\tpidx = idx + 1;\n+\t\tif (unlikely(pidx == rxq->nb_desc))\n+\t\t\tpidx = 0;\n+\n+\t\trte_prefetch0(rxq->sw_ring[pidx]);\n+\t\tif ((pidx & 0x3) == 0) {\n+\t\t\trte_prefetch0(&rxq->desc[pidx]);\n+\t\t\trte_prefetch0(&rxq->sw_ring[pidx]);\n+\t\t}\n+\n+\t\tmbuf = rxq->sw_ring[idx];\n+\t\t/* Check for any errors and free mbuf*/\n+\t\terr = AXGMAC_GET_BITS_LE(desc->write.desc3,\n+\t\t\t\t\t RX_NORMAL_DESC3, ES);\n+\t\terror_status = 0;\n+\t\tif (unlikely(err)) {\n+\t\t\terror_status = desc->write.desc3 & AXGBE_ERR_STATUS;\n+\t\t\tif ((error_status != AXGBE_L3_CSUM_ERR) &&\n+\t\t\t    (error_status != AXGBE_L4_CSUM_ERR)) {\n+\t\t\t\trxq->errors++;\n+\t\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\t\tgoto err_set;\n+\t\t\t}\n+\t\t}\n+\t\tif (rxq->pdata->rx_csum_enable) {\n+\t\t\tmbuf->ol_flags = 0;\n+\t\t\tmbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t\t\tif (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {\n+\t\t\t\tmbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;\n+\t\t\t\tmbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\t\t\tmbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;\n+\t\t\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;\n+\t\t\t} else if (\n+\t\t\t\tunlikely(error_status == AXGBE_L4_CSUM_ERR)) {\n+\t\t\t\tmbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;\n+\t\t\t\tmbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\t\t}\n+\t\t}\n+\t\trte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));\n+\t\t/* Get the RSS hash */\n+\t\tif (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))\n+\t\t\tmbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);\n+\t\tpkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,\n+\t\t\t\t\t     PL) - rxq->crc_len;\n+\t\t/* Mbuf populate */\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\t\tmbuf->pkt_len = pkt_len;\n+\t\tmbuf->data_len = pkt_len;\n+\t\trxq->bytes += pkt_len;\n+\t\trx_pkts[nb_rx++] = mbuf;\n+err_set:\n+\t\trxq->cur++;\n+\t\trxq->sw_ring[idx++] = tmbuf;\n+\t\tdesc->read.baddr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));\n+\t\tmemset((void *)(&desc->read.desc2), 0, 8);\n+\t\tAXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);\n+\t\trxq->dirty++;\n+\t}\n+\trxq->pkts += nb_rx;\n+\tif (rxq->dirty != old_dirty) {\n+\t\trte_wmb();\n+\t\tidx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);\n+\t\tAXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,\n+\t\t\t\t   low32_value(rxq->ring_phys_addr +\n+\t\t\t\t   (idx * sizeof(union axgbe_rx_desc))));\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n /* Tx Apis */\n static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)\n {\n@@ -174,6 +365,10 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\ttxq->free_thresh = (txq->nb_desc >> 1);\n \ttxq->free_batch_cnt = txq->free_thresh;\n \n+\t/* In vector_tx path threshold should be multiple of queue_size*/\n+\tif (txq->nb_desc % txq->free_thresh != 0)\n+\t\ttxq->vector_disable = 1;\n+\n \tif ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=\n \t    ETH_TXQ_FLAGS_NOOFFLOADS) {\n \t\ttxq->vector_disable = 1;\n@@ -211,9 +406,243 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \tif (!pdata->tx_queues)\n \t\tpdata->tx_queues = dev->data->tx_queues;\n \n+\tif (txq->vector_disable)\n+\t\tdev->tx_pkt_burst = &axgbe_xmit_pkts;\n+\telse\n+\t\tdev->tx_pkt_burst = &axgbe_xmit_pkts_vec;\n+\n \treturn 0;\n }\n \n+static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,\n+\t\t\t\t      unsigned int queue)\n+{\n+\tunsigned int tx_status;\n+\tunsigned long tx_timeout;\n+\n+\t/* The Tx engine cannot be stopped if it is actively processing\n+\t * packets. Wait for the Tx queue to empty the Tx fifo.  Don't\n+\t * wait forever though...\n+\t */\n+\ttx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *\n+\t\t\t\t\t       rte_get_timer_hz());\n+\twhile (time_before(rte_get_timer_cycles(), tx_timeout)) {\n+\t\ttx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);\n+\t\tif ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&\n+\t\t    (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))\n+\t\t\tbreak;\n+\n+\t\trte_delay_us(900);\n+\t}\n+\n+\tif (!time_before(rte_get_timer_cycles(), tx_timeout))\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"timed out waiting for Tx queue %u to empty\\n\",\n+\t\t\t    queue);\n+}\n+\n+static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,\n+\t\t\t\t  unsigned int queue)\n+{\n+\tunsigned int tx_dsr, tx_pos, tx_qidx;\n+\tunsigned int tx_status;\n+\tunsigned long tx_timeout;\n+\n+\tif (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)\n+\t\treturn axgbe_txq_prepare_tx_stop(pdata, queue);\n+\n+\t/* Calculate the status register to read and the position within */\n+\tif (queue < DMA_DSRX_FIRST_QUEUE) {\n+\t\ttx_dsr = DMA_DSR0;\n+\t\ttx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;\n+\t} else {\n+\t\ttx_qidx = queue - DMA_DSRX_FIRST_QUEUE;\n+\n+\t\ttx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);\n+\t\ttx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +\n+\t\t\tDMA_DSRX_TPS_START;\n+\t}\n+\n+\t/* The Tx engine cannot be stopped if it is actively processing\n+\t * descriptors. Wait for the Tx engine to enter the stopped or\n+\t * suspended state.  Don't wait forever though...\n+\t */\n+\ttx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *\n+\t\t\t\t\t       rte_get_timer_hz());\n+\twhile (time_before(rte_get_timer_cycles(), tx_timeout)) {\n+\t\ttx_status = AXGMAC_IOREAD(pdata, tx_dsr);\n+\t\ttx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);\n+\t\tif ((tx_status == DMA_TPS_STOPPED) ||\n+\t\t    (tx_status == DMA_TPS_SUSPENDED))\n+\t\t\tbreak;\n+\n+\t\trte_delay_us(900);\n+\t}\n+\n+\tif (!time_before(rte_get_timer_cycles(), tx_timeout))\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"timed out waiting for Tx DMA channel %u to stop\\n\",\n+\t\t\t    queue);\n+}\n+\n+void axgbe_dev_disable_tx(struct rte_eth_dev *dev)\n+{\n+\tstruct axgbe_tx_queue *txq;\n+\tstruct axgbe_port *pdata = dev->data->dev_private;\n+\tunsigned int i;\n+\n+\t/* Prepare for stopping DMA channel */\n+\tfor (i = 0; i < pdata->tx_q_count; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\taxgbe_prepare_tx_stop(pdata, i);\n+\t}\n+\t/* Disable MAC Tx */\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);\n+\t/* Disable each Tx queue*/\n+\tfor (i = 0; i < pdata->tx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,\n+\t\t\t\t\t0);\n+\t/* Disable each  Tx DMA channel */\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tAXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);\n+\t}\n+}\n+\n+void axgbe_dev_enable_tx(struct rte_eth_dev *dev)\n+{\n+\tstruct axgbe_tx_queue *txq;\n+\tstruct axgbe_port *pdata = dev->data->dev_private;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\t/* Enable Tx DMA channel */\n+\t\tAXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);\n+\t}\n+\t/* Enable Tx queue*/\n+\tfor (i = 0; i < pdata->tx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,\n+\t\t\t\t\tMTL_Q_ENABLED);\n+\t/* Enable MAC Tx */\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);\n+}\n+\n+/* Free Tx conformed mbufs */\n+static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)\n+{\n+\tvolatile struct axgbe_tx_desc *desc;\n+\tuint16_t idx;\n+\n+\tidx = AXGBE_GET_DESC_IDX(txq, txq->dirty);\n+\twhile (txq->cur != txq->dirty) {\n+\t\tif (unlikely(idx == txq->nb_desc))\n+\t\t\tidx = 0;\n+\t\tdesc = &txq->desc[idx];\n+\t\t/* Check for ownership */\n+\t\tif (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))\n+\t\t\treturn;\n+\t\tmemset((void *)&desc->desc2, 0, 8);\n+\t\t/* Free mbuf */\n+\t\trte_pktmbuf_free(txq->sw_ring[idx]);\n+\t\ttxq->sw_ring[idx++] = NULL;\n+\t\ttxq->dirty++;\n+\t}\n+}\n+\n+/* Tx Descriptor formation\n+ * Considering each mbuf requires one desc\n+ * mbuf is linear\n+ */\n+static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,\n+\t\t\t struct rte_mbuf *mbuf)\n+{\n+\tvolatile struct axgbe_tx_desc *desc;\n+\tuint16_t idx;\n+\tuint64_t mask;\n+\n+\tidx = AXGBE_GET_DESC_IDX(txq, txq->cur);\n+\tdesc = &txq->desc[idx];\n+\n+\t/* Update buffer address  and length */\n+\tdesc->baddr = rte_mbuf_data_iova(mbuf);\n+\tAXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,\n+\t\t\t   mbuf->pkt_len);\n+\t/* Total msg length to transmit */\n+\tAXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,\n+\t\t\t   mbuf->pkt_len);\n+\t/* Mark it as First and Last Descriptor */\n+\tAXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);\n+\tAXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);\n+\t/* Mark it as a NORMAL descriptor */\n+\tAXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);\n+\t/* configure h/w Offload */\n+\tmask = mbuf->ol_flags & PKT_TX_L4_MASK;\n+\tif ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))\n+\t\tAXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);\n+\telse if (mbuf->ol_flags & PKT_TX_IP_CKSUM)\n+\t\tAXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);\n+\trte_wmb();\n+\n+\t/* Set OWN bit */\n+\tAXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);\n+\trte_wmb();\n+\n+\t/* Save mbuf */\n+\ttxq->sw_ring[idx] = mbuf;\n+\t/* Update current index*/\n+\ttxq->cur++;\n+\t/* Update stats */\n+\ttxq->bytes += mbuf->pkt_len;\n+\n+\treturn 0;\n+}\n+\n+/* Eal supported tx wrapper*/\n+uint16_t\n+axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (unlikely(nb_pkts == 0))\n+\t\treturn nb_pkts;\n+\n+\tstruct axgbe_tx_queue *txq;\n+\tuint16_t nb_desc_free;\n+\tuint16_t nb_pkt_sent = 0;\n+\tuint16_t idx;\n+\tuint32_t tail_addr;\n+\tstruct rte_mbuf *mbuf;\n+\n+\ttxq  = (struct axgbe_tx_queue *)tx_queue;\n+\tnb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);\n+\n+\tif (unlikely(nb_desc_free <= txq->free_thresh)) {\n+\t\taxgbe_xmit_cleanup(txq);\n+\t\tnb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);\n+\t\tif (unlikely(nb_desc_free == 0))\n+\t\t\treturn 0;\n+\t}\n+\tnb_pkts = RTE_MIN(nb_desc_free, nb_pkts);\n+\twhile (nb_pkts--) {\n+\t\tmbuf = *tx_pkts++;\n+\t\tif (axgbe_xmit_hw(txq, mbuf))\n+\t\t\tgoto out;\n+\t\tnb_pkt_sent++;\n+\t}\n+out:\n+\t/* Sync read and write */\n+\trte_mb();\n+\tidx = AXGBE_GET_DESC_IDX(txq, txq->cur);\n+\ttail_addr = low32_value(txq->ring_phys_addr +\n+\t\t\t\tidx * sizeof(struct axgbe_tx_desc));\n+\t/* Update tail reg with next immediate address to kick Tx DMA channel*/\n+\tAXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);\n+\ttxq->pkts += nb_pkt_sent;\n+\treturn nb_pkt_sent;\n+}\n+\n void axgbe_dev_clear_queues(struct rte_eth_dev *dev)\n {\n \tPMD_INIT_FUNC_TRACE();\ndiff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h\nindex 1b88d7a..f221cc3 100644\n--- a/drivers/net/axgbe/axgbe_rxtx.h\n+++ b/drivers/net/axgbe/axgbe_rxtx.h\n@@ -156,12 +156,31 @@ void axgbe_dev_tx_queue_release(void *txq);\n int  axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \t\t\t      uint16_t nb_tx_desc, unsigned int socket_id,\n \t\t\t      const struct rte_eth_txconf *tx_conf);\n+void axgbe_dev_enable_tx(struct rte_eth_dev *dev);\n+void axgbe_dev_disable_tx(struct rte_eth_dev *dev);\n+int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+\n+uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t uint16_t nb_pkts);\n+uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t uint16_t nb_pkts);\n+\n \n void axgbe_dev_rx_queue_release(void *rxq);\n int  axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\t\t      uint16_t nb_rx_desc, unsigned int socket_id,\n \t\t\t      const struct rte_eth_rxconf *rx_conf,\n \t\t\t      struct rte_mempool *mb_pool);\n+void axgbe_dev_enable_rx(struct rte_eth_dev *dev);\n+void axgbe_dev_disable_rx(struct rte_eth_dev *dev);\n+int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t uint16_t nb_pkts);\n+uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,\n+\t\t\t\t\t   struct rte_mbuf **rx_pkts,\n+\t\t\t\t\t   uint16_t nb_pkts);\n void axgbe_dev_clear_queues(struct rte_eth_dev *dev);\n \n #endif /* _AXGBE_RXTX_H_ */\ndiff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c\nnew file mode 100644\nindex 0000000..9be7037\n--- /dev/null\n+++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c\n@@ -0,0 +1,93 @@\n+/*   SPDX-License-Identifier: BSD-3-Clause\n+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.\n+ *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.\n+ */\n+\n+#include \"axgbe_ethdev.h\"\n+#include \"axgbe_rxtx.h\"\n+#include \"axgbe_phy.h\"\n+\n+#include <rte_time.h>\n+#include <rte_mempool.h>\n+#include <rte_mbuf.h>\n+\n+/* Useful to avoid shifting for every descriptor prepration*/\n+#define TX_DESC_CTRL_FLAGS 0xb000000000000000\n+#define TX_FREE_BULK\t   8\n+#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1)\n+\n+static inline void\n+axgbe_vec_tx(volatile struct axgbe_tx_desc *desc,\n+\t     struct rte_mbuf *mbuf)\n+{\n+\t__m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |\n+\t\t\t\t\t    TX_DESC_CTRL_FLAGS | mbuf->data_len,\n+\t\t\t\t\t    mbuf->buf_iova\n+\t\t\t\t\t    + mbuf->data_off);\n+\t_mm_store_si128((__m128i *)desc, descriptor);\n+}\n+\n+static void\n+axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq)\n+{\n+\tvolatile struct axgbe_tx_desc *desc;\n+\tint idx, i;\n+\n+\tidx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt\n+\t\t\t\t - 1);\n+\tdesc = &txq->desc[idx];\n+\tif (desc->desc3 & AXGBE_DESC_OWN)\n+\t\treturn;\n+\t/* memset avoided for desc ctrl fields since in vec_tx path\n+\t * all 128 bits are populated\n+\t */\n+\tfor (i = 0; i < txq->free_batch_cnt; i++, idx--)\n+\t\trte_pktmbuf_free_seg(txq->sw_ring[idx]);\n+\n+\n+\ttxq->dirty += txq->free_batch_cnt;\n+\ttxq->nb_desc_free += txq->free_batch_cnt;\n+}\n+\n+uint16_t\n+axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t    uint16_t nb_pkts)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tstruct axgbe_tx_queue *txq;\n+\tuint16_t idx, nb_commit, loop, i;\n+\tuint32_t tail_addr;\n+\n+\ttxq  = (struct axgbe_tx_queue *)tx_queue;\n+\tif (txq->nb_desc_free < txq->free_thresh) {\n+\t\taxgbe_xmit_cleanup_vec(txq);\n+\t\tif (unlikely(txq->nb_desc_free == 0))\n+\t\t\treturn 0;\n+\t}\n+\tnb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts);\n+\tnb_commit = nb_pkts;\n+\tidx = AXGBE_GET_DESC_IDX(txq, txq->cur);\n+\tloop = txq->nb_desc - idx;\n+\tif (nb_commit >= loop) {\n+\t\tfor (i = 0; i < loop; ++i, ++idx, ++tx_pkts) {\n+\t\t\taxgbe_vec_tx(&txq->desc[idx], *tx_pkts);\n+\t\t\ttxq->sw_ring[idx] = *tx_pkts;\n+\t\t}\n+\t\tnb_commit -= loop;\n+\t\tidx = 0;\n+\t}\n+\tfor (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) {\n+\t\taxgbe_vec_tx(&txq->desc[idx], *tx_pkts);\n+\t\ttxq->sw_ring[idx] = *tx_pkts;\n+\t}\n+\ttxq->cur += nb_pkts;\n+\ttail_addr = (uint32_t)(txq->ring_phys_addr +\n+\t\t\t       idx * sizeof(struct axgbe_tx_desc));\n+\t/* Update tail reg with next immediate address to kick Tx DMA channel*/\n+\trte_write32(tail_addr, (void *)txq->dma_tail_reg);\n+\ttxq->pkts += nb_pkts;\n+\ttxq->nb_desc_free -= nb_pkts;\n+\n+\treturn nb_pkts;\n+}\n",
    "prefixes": [
        "dpdk-dev",
        "v4",
        "10/17"
    ]
}