get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94593/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94593,
    "url": "https://patches.dpdk.org/api/patches/94593/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210621073506.96017-2-wojciechx.liguzinski@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210621073506.96017-2-wojciechx.liguzinski@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210621073506.96017-2-wojciechx.liguzinski@intel.com",
    "date": "2021-06-21T07:35:04",
    "name": "[RFC,v3,1/3] sched: add PIE based congestion management",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "2b5a08497f0392121eab4ff9744ba1bc31872a60",
    "submitter": {
        "id": 2195,
        "url": "https://patches.dpdk.org/api/people/2195/?format=api",
        "name": "Liguzinski, WojciechX",
        "email": "wojciechx.liguzinski@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210621073506.96017-2-wojciechx.liguzinski@intel.com/mbox/",
    "series": [
        {
            "id": 17417,
            "url": "https://patches.dpdk.org/api/series/17417/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17417",
            "date": "2021-06-21T07:35:03",
            "name": "Add PIE support for HQoS library",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/17417/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94593/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/94593/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 03B36A0547;\n\tMon, 21 Jun 2021 09:35:49 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D20494117C;\n\tMon, 21 Jun 2021 09:35:40 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n by mails.dpdk.org (Postfix) with ESMTP id B7D4740040\n for <dev@dpdk.org>; Mon, 21 Jun 2021 09:35:36 +0200 (CEST)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 21 Jun 2021 00:35:25 -0700",
            "from silpixa00400629.ir.intel.com ([10.237.214.62])\n by fmsmga008.fm.intel.com with ESMTP; 21 Jun 2021 00:35:23 -0700"
        ],
        "IronPort-SDR": [
            "\n Vuh48qc6km2dfv33n7qGVDiVUFFaNXUiyf24x9gP1g7q2PyuogvBjrQu5RNd77Sqj1Y4aftrlJ\n kfrg+YOnaggQ==",
            "\n 0DwWCrs1K9sV5z9w4t4KkAVXqxiBtwSjjNlOZJbPphTTTpED5cFjox4hiafKySd3aGVdMEPiYI\n vkmBpXm8bUqA=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10021\"; a=\"206614868\"",
            "E=Sophos;i=\"5.83,289,1616482800\"; d=\"scan'208\";a=\"206614868\"",
            "E=Sophos;i=\"5.83,289,1616482800\"; d=\"scan'208\";a=\"453794325\""
        ],
        "X-ExtLoop1": "1",
        "From": "\"Liguzinski, WojciechX\" <wojciechx.liguzinski@intel.com>",
        "To": "dev@dpdk.org,\n\tjasvinder.singh@intel.com,\n\tcristian.dumitrescu@intel.com",
        "Cc": "savinay.dharmappa@intel.com,\n\tmegha.ajmera@intel.com",
        "Date": "Mon, 21 Jun 2021 08:35:04 +0100",
        "Message-Id": "<20210621073506.96017-2-wojciechx.liguzinski@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210621073506.96017-1-wojciechx.liguzinski@intel.com>",
        "References": "<20210615090200.56824-1-wojciechx.liguzinski@intel.com>\n <20210621073506.96017-1-wojciechx.liguzinski@intel.com>",
        "Subject": "[dpdk-dev] [RFC PATCH v3 1/3] sched: add PIE based congestion\n management",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Implement PIE based congestion management based on rfc8033\n\nSigned-off-by: Liguzinski, WojciechX <wojciechx.liguzinski@intel.com>\n---\n drivers/net/softnic/rte_eth_softnic_tm.c |   6 +-\n lib/sched/meson.build                    |  10 +-\n lib/sched/rte_pie.c                      |  78 +++++\n lib/sched/rte_pie.h                      | 388 +++++++++++++++++++++++\n lib/sched/rte_sched.c                    | 229 +++++++++----\n lib/sched/rte_sched.h                    |  53 +++-\n 6 files changed, 673 insertions(+), 91 deletions(-)\n create mode 100644 lib/sched/rte_pie.c\n create mode 100644 lib/sched/rte_pie.h",
    "diff": "diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c\nindex 90baba15ce..5b6c4e6d4b 100644\n--- a/drivers/net/softnic/rte_eth_softnic_tm.c\n+++ b/drivers/net/softnic/rte_eth_softnic_tm.c\n@@ -420,7 +420,7 @@ pmd_tm_node_type_get(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-#ifdef RTE_SCHED_RED\n+#ifdef RTE_SCHED_AQM\n #define WRED_SUPPORTED\t\t\t\t\t\t1\n #else\n #define WRED_SUPPORTED\t\t\t\t\t\t0\n@@ -2306,7 +2306,7 @@ tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)\n \treturn NULL;\n }\n \n-#ifdef RTE_SCHED_RED\n+#ifdef RTE_SCHED_AQM\n \n static void\n wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)\n@@ -2321,7 +2321,7 @@ wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)\n \tfor (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)\n \t\tfor (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {\n \t\t\tstruct rte_red_params *dst =\n-\t\t\t\t&pp->red_params[tc_id][color];\n+\t\t\t\t&pp->wred_params[tc_id][color];\n \t\t\tstruct tm_wred_profile *src_wp =\n \t\t\t\ttm_tc_wred_profile_get(dev, tc_id);\n \t\t\tstruct rte_tm_red_params *src =\ndiff --git a/lib/sched/meson.build b/lib/sched/meson.build\nindex b24f7b8775..e7ae9bcf19 100644\n--- a/lib/sched/meson.build\n+++ b/lib/sched/meson.build\n@@ -1,11 +1,7 @@\n # SPDX-License-Identifier: BSD-3-Clause\n # Copyright(c) 2017 Intel Corporation\n \n-sources = files('rte_sched.c', 'rte_red.c', 'rte_approx.c')\n-headers = files(\n-        'rte_approx.h',\n-        'rte_red.h',\n-        'rte_sched.h',\n-        'rte_sched_common.h',\n-)\n+sources = files('rte_sched.c', 'rte_red.c', 'rte_approx.c', 'rte_pie.c')\n+headers = files('rte_sched.h', 'rte_sched_common.h',\n+\t\t'rte_red.h', 'rte_approx.h', 'rte_pie.h')\n deps += ['mbuf', 'meter']\ndiff --git a/lib/sched/rte_pie.c b/lib/sched/rte_pie.c\nnew file mode 100644\nindex 0000000000..f538dda21d\n--- /dev/null\n+++ b/lib/sched/rte_pie.c\n@@ -0,0 +1,78 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#include <stdlib.h>\n+\n+#include \"rte_pie.h\"\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+\n+#ifdef __INTEL_COMPILER\n+#pragma warning(disable:2259) /* conversion may lose significant bits */\n+#endif\n+\n+int\n+rte_pie_rt_data_init(struct rte_pie *pie)\n+{\n+\tif (pie == NULL)\n+\t\treturn -1;\n+\n+\tpie->active = 0;\n+\tpie->in_measurement = 0;\n+\tpie->departed_bytes_count = 0;\n+\tpie->start_measurement = 0;\n+\tpie->last_measurement = 0;\n+\tpie->qlen = 0;\n+\tpie->avg_dq_time = 0;\n+\tpie->burst_allowance = 0;\n+\tpie->qdelay_old = 0;\n+\tpie->drop_prob = 0;\n+\tpie->accu_prob = 0;\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_pie_config_init(struct rte_pie_config *pie_cfg,\n+\tconst uint16_t qdelay_ref,\n+\tconst uint16_t dp_update_interval,\n+\tconst uint16_t max_burst,\n+\tconst uint16_t tailq_th)\n+{\n+\tuint64_t tsc_hz = rte_get_tsc_hz();\n+\n+\tif (pie_cfg == NULL)\n+\t\treturn -1;\n+\n+\tif (qdelay_ref <= 0) {\n+\t\tRTE_LOG(ERR, SCHED,\n+\t\t\t\"%s: Incorrect value for qdelay_ref\\n\", __func__);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (dp_update_interval <= 0) {\n+\t\tRTE_LOG(ERR, SCHED,\n+\t\t\t\"%s: Incorrect value for dp_update_interval\\n\", __func__);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (max_burst <= 0) {\n+\t\tRTE_LOG(ERR, SCHED,\n+\t\t\t\"%s: Incorrect value for max_burst\\n\", __func__);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (tailq_th <= 0) {\n+\t\tRTE_LOG(ERR, SCHED,\n+\t\t\t\"%s: Incorrect value for tailq_th\\n\", __func__);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tpie_cfg->qdelay_ref = (tsc_hz * qdelay_ref) / 1000;\n+\tpie_cfg->dp_update_interval = (tsc_hz * dp_update_interval) / 1000;\n+\tpie_cfg->max_burst = (tsc_hz * max_burst) / 1000;\n+\tpie_cfg->tailq_th = tailq_th;\n+\n+\treturn 0;\n+}\ndiff --git a/lib/sched/rte_pie.h b/lib/sched/rte_pie.h\nnew file mode 100644\nindex 0000000000..9295f39c07\n--- /dev/null\n+++ b/lib/sched/rte_pie.h\n@@ -0,0 +1,388 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#ifndef __RTE_PIE_H_INCLUDED__\n+#define __RTE_PIE_H_INCLUDED__\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+/**\n+ * @file\n+ * RTE Proportional Integral controller Enhanced (PIE)\n+ *\n+ *\n+ ***/\n+\n+#include <stdint.h>\n+\n+#include <rte_random.h>\n+#include <rte_debug.h>\n+\n+#define RTE_DQ_THRESHOLD   16384   /**< Queue length threshold (2^14)\n+\t\t\t\t     * to start measurement cycle (bytes)\n+\t\t\t\t     */\n+#define RTE_DQ_WEIGHT      0.25    /**< Weight (RTE_DQ_THRESHOLD/2^16) to compute dequeue rate */\n+#define RTE_ALPHA          0.125   /**< Weights in drop probability calculations */\n+#define RTE_BETA           1.25    /**< Weights in drop probability calculations */\n+#define RTE_RAND_MAX      ~0LLU    /**< Max value of the random number */\n+\n+\n+/**\n+ * PIE configuration parameters passed by user\n+ *\n+ */\n+struct rte_pie_params {\n+\tuint16_t qdelay_ref;           /**< Latency Target (milliseconds) */\n+\tuint16_t dp_update_interval;   /**< Update interval for drop probability (milliseconds) */\n+\tuint16_t max_burst;            /**< Max Burst Allowance (milliseconds) */\n+\tuint16_t tailq_th;             /**< Tailq drop threshold (packet counts) */\n+};\n+\n+/**\n+ * PIE configuration parameters\n+ *\n+ */\n+struct rte_pie_config {\n+\tuint64_t qdelay_ref;           /**< Latency Target (in CPU cycles.) */\n+\tuint64_t dp_update_interval;   /**< Update interval for drop probability (in CPU cycles) */\n+\tuint64_t max_burst;            /**< Max Burst Allowance (in CPU cycles.) */\n+\tuint16_t tailq_th;             /**< Tailq drop threshold (packet counts) */\n+};\n+\n+/**\n+ * RED run-time data\n+ */\n+struct rte_pie {\n+\tuint16_t active;               /**< Flag for activating/deactivating pie */\n+\tuint16_t in_measurement;       /**< Flag for activation of measurement cycle */\n+\tuint32_t departed_bytes_count; /**< Number of bytes departed in current measurement cycle */\n+\tuint64_t start_measurement;    /**< Time to start to measurement cycle (in cpu cycles) */\n+\tuint64_t last_measurement;     /**< Time of last measurement (in cpu cycles) */\n+\tuint64_t qlen;                 /**< Queue length (packets count) */\n+\tuint64_t qlen_bytes;           /**< Queue length (bytes count) */\n+\tuint64_t avg_dq_time;          /**< Time averaged dequeue rate (in cpu cycles) */\n+\tuint32_t burst_allowance;      /**< Current burst allowance (bytes) */\n+\tuint64_t qdelay_old;           /**< Old queue delay (bytes) */\n+\tdouble drop_prob;              /**< Current packet drop probability */\n+\tdouble accu_prob;              /**< Accumulated packet drop probability */\n+};\n+\n+/**\n+ * @brief Initialises run-time data\n+ *\n+ * @param pie [in,out] data pointer to PIE runtime data\n+ *\n+ * @return Operation status\n+ * @retval 0 success\n+ * @retval !0 error\n+ */\n+int\n+rte_pie_rt_data_init(struct rte_pie *pie);\n+\n+/**\n+ * @brief Configures a single PIE configuration parameter structure.\n+ *\n+ * @param pie_cfg [in,out] config pointer to a PIE configuration parameter structure\n+ * @param qdelay_ref [in]  latency target(milliseconds)\n+ * @param dp_update_interval [in] update interval for drop probability (milliseconds)\n+ * @param max_burst [in] maximum burst allowance (milliseconds)\n+ * @param tailq_th [in] tail drop threshold for the queue (number of packets)\n+ *\n+ * @return Operation status\n+ * @retval 0 success\n+ * @retval !0 error\n+ */\n+int\n+rte_pie_config_init(struct rte_pie_config *pie_cfg,\n+\tconst uint16_t qdelay_ref,\n+\tconst uint16_t dp_update_interval,\n+\tconst uint16_t max_burst,\n+\tconst uint16_t tailq_th);\n+\n+/**\n+ * @brief Decides packet enqueue when queue is empty\n+ *\n+ * Note: packet is never dropped in this particular case.\n+ *\n+ * @param pie_cfg [in] config pointer to a PIE configuration parameter structure\n+ * @param pie [in, out] data pointer to PIE runtime data\n+ * @param pkt_len [in] packet length in bytes\n+ *\n+ * @return Operation status\n+ * @retval 0 enqueue the packet\n+ * @retval !0 drop the packet\n+ */\n+static inline int\n+rte_pie_enqueue_empty(const struct rte_pie_config *pie_cfg,\n+\tstruct rte_pie *pie,\n+\tuint32_t pkt_len)\n+{\n+\tRTE_ASSERT(pkt_len != NULL);\n+\n+\t/* Update the PIE qlen parameter */\n+\tpie->qlen++;\n+\tpie->qlen_bytes += pkt_len;\n+\n+\t/**\n+\t * If the queue has been idle for a while, turn off PIE and Reset counters\n+\t */\n+\tif ((pie->active == 1) &&\n+\t\t(pie->qlen < (pie_cfg->tailq_th * 0.1))) {\n+\t\tpie->active =  0;\n+\t\tpie->in_measurement = 0;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * @brief make a decision to drop or enqueue a packet based on probability\n+ *        criteria\n+ *\n+ * @param pie_cfg [in] config pointer to a PIE configuration parameter structure\n+ * @param pie [in, out] data pointer to PIE runtime data\n+ * @param time [in] current time (measured in cpu cycles)\n+ */\n+static inline void\n+_calc_drop_probability(const struct rte_pie_config *pie_cfg,\n+\tstruct rte_pie *pie, uint64_t time)\n+{\n+\tuint64_t qdelay_ref = pie_cfg->qdelay_ref;\n+\n+\t/* Note: can be implemented using integer multiply.\n+\t * DQ_THRESHOLD is power of 2 value.\n+\t */\n+\tdouble current_qdelay = pie->qlen * (pie->avg_dq_time / RTE_DQ_THRESHOLD);\n+\n+\tdouble p = RTE_ALPHA * (current_qdelay - qdelay_ref) +\n+\t\tRTE_BETA * (current_qdelay - pie->qdelay_old);\n+\n+\tif (pie->drop_prob < 0.000001)\n+\t\tp = p * 0.00048828125;              /* (1/2048) = 0.00048828125 */\n+\telse if (pie->drop_prob < 0.00001)\n+\t\tp = p * 0.001953125;                /* (1/512) = 0.001953125  */\n+\telse if (pie->drop_prob < 0.0001)\n+\t\tp = p * 0.0078125;                  /* (1/128) = 0.0078125  */\n+\telse if (pie->drop_prob < 0.001)\n+\t\tp = p * 0.03125;                    /* (1/32) = 0.03125   */\n+\telse if (pie->drop_prob < 0.01)\n+\t\tp = p * 0.125;                      /* (1/8) = 0.125    */\n+\telse if (pie->drop_prob < 0.1)\n+\t\tp = p * 0.5;                        /* (1/2) = 0.5    */\n+\n+\tif (pie->drop_prob >= 0.1 && p > 0.02)\n+\t\tp = 0.02;\n+\n+\tpie->drop_prob += p;\n+\n+\tdouble qdelay = qdelay_ref * 0.5;\n+\n+\t/*  Exponentially decay drop prob when congestion goes away  */\n+\tif (current_qdelay < qdelay && pie->qdelay_old < qdelay)\n+\t\tpie->drop_prob *= 0.98;     /* 1 - 1/64 is sufficient */\n+\n+\t/* Bound drop probability */\n+\tif (pie->drop_prob < 0)\n+\t\tpie->drop_prob = 0;\n+\tif (pie->drop_prob > 1)\n+\t\tpie->drop_prob = 1;\n+\n+\tpie->qdelay_old = current_qdelay;\n+\tpie->last_measurement = time;\n+\n+\tuint64_t burst_allowance = pie->burst_allowance - pie_cfg->dp_update_interval;\n+\n+\tpie->burst_allowance = (burst_allowance > 0) ? burst_allowance : 0;\n+}\n+\n+/**\n+ * @brief make a decision to drop or enqueue a packet based on probability\n+ *        criteria\n+ *\n+ * @param pie_cfg [in] config pointer to a PIE configuration parameter structure\n+ * @param pie [in, out] data pointer to PIE runtime data\n+ *\n+ * @return operation status\n+ * @retval 0 enqueue the packet\n+ * @retval 1 drop the packet\n+ */\n+static inline int\n+_rte_pie_drop(const struct rte_pie_config *pie_cfg,\n+\tstruct rte_pie *pie)\n+{\n+\tuint64_t rand_value;\n+\tdouble qdelay = pie_cfg->qdelay_ref * 0.5;\n+\n+\t/* PIE is active but the queue is not congested: return 0 */\n+\tif (((pie->qdelay_old < qdelay) && (pie->drop_prob < 0.2)) ||\n+\t\t(pie->qlen <= (pie_cfg->tailq_th * 0.1)))\n+\t\treturn 0;\n+\n+\tif (pie->drop_prob == 0)\n+\t\tpie->accu_prob = 0;\n+\n+\t/* For practical reasons, drop probability can be further scaled according\n+\t * to packet size, but one needs to set a bound to avoid unnecessary bias\n+\t * Random drop\n+\t */\n+\tpie->accu_prob += pie->drop_prob;\n+\n+\tif (pie->accu_prob < 0.85)\n+\t\treturn 0;\n+\n+\tif (pie->accu_prob >= 8.5)\n+\t\treturn 1;\n+\n+\trand_value = rte_rand()/RTE_RAND_MAX;\n+\n+\tif ((double)rand_value < pie->drop_prob) {\n+\t\tpie->accu_prob = 0;\n+\t\treturn 1;\n+\t}\n+\n+\t/* No drop */\n+\treturn 0;\n+}\n+\n+/**\n+ * @brief Decides if new packet should be enqeued or dropped for non-empty queue\n+ *\n+ * @param pie_cfg [in] config pointer to a PIE configuration parameter structure\n+ * @param pie [in,out] data pointer to PIE runtime data\n+ * @param pkt_len [in] packet length in bytes\n+ * @param time [in] current time (measured in cpu cycles)\n+ *\n+ * @return Operation status\n+ * @retval 0 enqueue the packet\n+ * @retval 1 drop the packet based on max threshold criterion\n+ * @retval 2 drop the packet based on mark probability criterion\n+ */\n+static inline int\n+rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,\n+\tstruct rte_pie *pie,\n+\tuint32_t pkt_len,\n+\tconst uint64_t time)\n+{\n+\t/* Check queue space against the tail drop threshold */\n+\tif (pie->qlen >= pie_cfg->tailq_th) {\n+\n+\t\tpie->accu_prob = 0;\n+\t\treturn 1;\n+\t}\n+\n+\tif (pie->active) {\n+\t\t/* Update drop probability after certain interval */\n+\t\tif ((time - pie->last_measurement) >= pie_cfg->dp_update_interval)\n+\t\t\t_calc_drop_probability(pie_cfg, pie, time);\n+\n+\t\t/* Decide whether packet to be dropped or enqueued */\n+\t\tif (_rte_pie_drop(pie_cfg, pie) && pie->burst_allowance == 0)\n+\t\t\treturn 2;\n+\t}\n+\n+\t/* When queue occupancy is over a certain threshold, turn on PIE */\n+\tif ((pie->active == 0) &&\n+\t\t(pie->qlen >= (pie_cfg->tailq_th * 0.1))) {\n+\t\tpie->active = 1;\n+\t\tpie->qdelay_old = 0;\n+\t\tpie->drop_prob = 0;\n+\t\tpie->in_measurement = 1;\n+\t\tpie->departed_bytes_count = 0;\n+\t\tpie->avg_dq_time = 0;\n+\t\tpie->last_measurement = time;\n+\t\tpie->burst_allowance = pie_cfg->max_burst;\n+\t\tpie->accu_prob = 0;\n+\t\tpie->start_measurement = time;\n+\t}\n+\n+\t/* when queue has been idle for a while, turn off PIE and Reset counters */\n+\tif (pie->active == 1 &&\n+\t\tpie->qlen < (pie_cfg->tailq_th * 0.1)) {\n+\t\tpie->active =  0;\n+\t\tpie->in_measurement = 0;\n+\t}\n+\n+\t/* Update PIE qlen parameter */\n+\tpie->qlen++;\n+\tpie->qlen_bytes += pkt_len;\n+\n+\t/* No drop */\n+\treturn 0;\n+}\n+\n+/**\n+ * @brief Decides if new packet should be enqeued or dropped\n+ * Updates run time data and gives verdict whether to enqueue or drop the packet.\n+ *\n+ * @param pie_cfg [in] config pointer to a PIE configuration parameter structure\n+ * @param pie [in,out] data pointer to PIE runtime data\n+ * @param pkt_len [in] packet length in bytes\n+ * @param time [in] current time stamp (measured in cpu cycles)\n+ *\n+ * @return Operation status\n+ * @retval 0 enqueue the packet\n+ * @retval 1 drop the packet based on drop probility criteria\n+ */\n+static inline int\n+rte_pie_enqueue(const struct rte_pie_config *pie_cfg,\n+\tstruct rte_pie *pie,\n+\tconst unsigned int qlen,\n+\tuint32_t pkt_len,\n+\tconst uint64_t time)\n+{\n+\tRTE_ASSERT(pie_cfg != NULL);\n+\tRTE_ASSERT(pie != NULL);\n+\n+\tif (qlen != 0)\n+\t\treturn rte_pie_enqueue_nonempty(pie_cfg, pie, pkt_len, time);\n+\telse\n+\t\treturn rte_pie_enqueue_empty(pie_cfg, pie, pkt_len);\n+}\n+\n+/**\n+ * @brief PIE rate estimation method\n+ * Called on each packet departure.\n+ *\n+ * @param pie [in] data pointer to PIE runtime data\n+ * @param pkt_len [in] packet length in bytes\n+ * @param time [in] current time stamp in cpu cycles\n+ */\n+static inline void\n+rte_pie_dequeue(struct rte_pie *pie,\n+\tuint32_t pkt_len,\n+\tuint64_t time)\n+{\n+\t/* Dequeue rate estimation */\n+\tif (pie->in_measurement) {\n+\t\tpie->departed_bytes_count += pkt_len;\n+\n+\t\t/* Start a new measurement cycle when enough packets */\n+\t\tif (pie->departed_bytes_count >= RTE_DQ_THRESHOLD) {\n+\t\t\tuint64_t dq_time = time - pie->start_measurement;\n+\n+\t\t\tif (pie->avg_dq_time == 0)\n+\t\t\t\tpie->avg_dq_time = dq_time;\n+\t\t\telse\n+\t\t\t\tpie->avg_dq_time = dq_time * RTE_DQ_WEIGHT + pie->avg_dq_time\n+\t\t\t\t\t* (1 - RTE_DQ_WEIGHT);\n+\n+\t\t\tpie->in_measurement = 0;\n+\t\t}\n+\t}\n+\n+\t/* Start measurement cycle when enough data in the queue */\n+\tif ((pie->qlen_bytes >= RTE_DQ_THRESHOLD) && (pie->in_measurement == 0)) {\n+\t\tpie->in_measurement = 1;\n+\t\tpie->start_measurement = time;\n+\t\tpie->departed_bytes_count = 0;\n+\t}\n+}\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* __RTE_PIE_H_INCLUDED__ */\ndiff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c\nindex cd87e688e4..afda39caf5 100644\n--- a/lib/sched/rte_sched.c\n+++ b/lib/sched/rte_sched.c\n@@ -89,8 +89,12 @@ struct rte_sched_queue {\n \n struct rte_sched_queue_extra {\n \tstruct rte_sched_queue_stats stats;\n-#ifdef RTE_SCHED_RED\n-\tstruct rte_red red;\n+#ifdef RTE_SCHED_AQM\n+\tRTE_STD_C11\n+\tunion {\n+\t\tstruct rte_red red;\n+\t\tstruct rte_pie pie;\n+\t};\n #endif\n };\n \n@@ -183,8 +187,13 @@ struct rte_sched_subport {\n \t/* Pipe queues size */\n \tuint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];\n \n-#ifdef RTE_SCHED_RED\n-\tstruct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];\n+\tenum rte_sched_aqm_mode aqm;\n+#ifdef RTE_SCHED_AQM\n+\tRTE_STD_C11\n+\tunion {\n+\t\tstruct rte_red_config wred_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];\n+\t\tstruct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];\n+\t};\n #endif\n \n \t/* Scheduling loop detection */\n@@ -1078,6 +1087,91 @@ rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)\n \trte_free(port);\n }\n \n+#ifdef RTE_SCHED_AQM\n+\n+static int\n+rte_sched_red_config(struct rte_sched_port *port,\n+\tstruct rte_sched_subport *s,\n+\tstruct rte_sched_subport_params *params,\n+\tuint32_t n_subports)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {\n+\n+\t\tuint32_t j;\n+\n+\t\tfor (j = 0; j < RTE_COLORS; j++) {\n+\t\t\t/* if min/max are both zero, then RED is disabled */\n+\t\t\tif ((params->wred_params[i][j].min_th |\n+\t\t\t\t params->wred_params[i][j].max_th) == 0) {\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\tif (rte_red_config_init(&s->wred_config[i][j],\n+\t\t\t\tparams->wred_params[i][j].wq_log2,\n+\t\t\t\tparams->wred_params[i][j].min_th,\n+\t\t\t\tparams->wred_params[i][j].max_th,\n+\t\t\t\tparams->wred_params[i][j].maxp_inv) != 0) {\n+\t\t\t\trte_sched_free_memory(port, n_subports);\n+\n+\t\t\t\tRTE_LOG(NOTICE, SCHED,\n+\t\t\t\t\"%s: RED configuration init fails\\n\", __func__);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\t\t}\n+\t}\n+\ts->aqm = RTE_SCHED_AQM_WRED;\n+\treturn 0;\n+}\n+\n+static int\n+rte_sched_pie_config(struct rte_sched_port *port,\n+\tstruct rte_sched_subport *s,\n+\tstruct rte_sched_subport_params *params,\n+\tuint32_t n_subports)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {\n+\t\tif (params->pie_params[i].tailq_th > params->qsize[i]) {\n+\t\t\tRTE_LOG(NOTICE, SCHED,\n+\t\t\t\"%s: PIE tailq threshold incorrect\\n\", __func__);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tif (rte_pie_config_init(&s->pie_config[i],\n+\t\t\tparams->pie_params[i].qdelay_ref,\n+\t\t\tparams->pie_params[i].dp_update_interval,\n+\t\t\tparams->pie_params[i].max_burst,\n+\t\t\tparams->pie_params[i].tailq_th) != 0) {\n+\t\t\trte_sched_free_memory(port, n_subports);\n+\n+\t\t\tRTE_LOG(NOTICE, SCHED,\n+\t\t\t\"%s: PIE configuration init fails\\n\", __func__);\n+\t\t\treturn -EINVAL;\n+\t\t\t}\n+\t}\n+\ts->aqm = RTE_SCHED_AQM_PIE;\n+\treturn 0;\n+}\n+\n+static int\n+rte_sched_aqm_config(struct rte_sched_port *port,\n+\tstruct rte_sched_subport *s,\n+\tstruct rte_sched_subport_params *params,\n+\tuint32_t n_subports)\n+{\n+\tif (params->aqm == RTE_SCHED_AQM_WRED)\n+\t\treturn rte_sched_red_config(port, s, params, n_subports);\n+\n+\telse if (params->aqm == RTE_SCHED_AQM_PIE)\n+\t\treturn rte_sched_pie_config(port, s, params, n_subports);\n+\n+\treturn -EINVAL;\n+}\n+#endif\n+\n int\n rte_sched_subport_config(struct rte_sched_port *port,\n \tuint32_t subport_id,\n@@ -1169,30 +1263,11 @@ rte_sched_subport_config(struct rte_sched_port *port,\n \t\ts->n_pipe_profiles = params->n_pipe_profiles;\n \t\ts->n_max_pipe_profiles = params->n_max_pipe_profiles;\n \n-#ifdef RTE_SCHED_RED\n-\t\tfor (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {\n-\t\t\tuint32_t j;\n-\n-\t\t\tfor (j = 0; j < RTE_COLORS; j++) {\n-\t\t\t/* if min/max are both zero, then RED is disabled */\n-\t\t\t\tif ((params->red_params[i][j].min_th |\n-\t\t\t\t     params->red_params[i][j].max_th) == 0) {\n-\t\t\t\t\tcontinue;\n-\t\t\t\t}\n-\n-\t\t\t\tif (rte_red_config_init(&s->red_config[i][j],\n-\t\t\t\t    params->red_params[i][j].wq_log2,\n-\t\t\t\t    params->red_params[i][j].min_th,\n-\t\t\t\t    params->red_params[i][j].max_th,\n-\t\t\t\t    params->red_params[i][j].maxp_inv) != 0) {\n-\t\t\t\t\trte_sched_free_memory(port, n_subports);\n-\n-\t\t\t\t\tRTE_LOG(NOTICE, SCHED,\n-\t\t\t\t\t\"%s: RED configuration init fails\\n\",\n-\t\t\t\t\t__func__);\n-\t\t\t\t\treturn -EINVAL;\n-\t\t\t\t}\n-\t\t\t}\n+#ifdef RTE_SCHED_AQM\n+\t\tstatus = rte_sched_aqm_config(port, s, params, n_subports);\n+\t\tif (status) {\n+\t\t\tRTE_LOG(NOTICE, SCHED, \"%s: AQM configuration fails\\n\", __func__);\n+\t\t\treturn status;\n \t\t}\n #endif\n \n@@ -1714,29 +1789,20 @@ rte_sched_port_update_subport_stats(struct rte_sched_port *port,\n \tsubport->stats.n_bytes_tc[tc_index] += pkt_len;\n }\n \n-#ifdef RTE_SCHED_RED\n static inline void\n rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,\n \tstruct rte_sched_subport *subport,\n \tuint32_t qindex,\n \tstruct rte_mbuf *pkt,\n-\tuint32_t red)\n-#else\n-static inline void\n-rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,\n-\tstruct rte_sched_subport *subport,\n-\tuint32_t qindex,\n-\tstruct rte_mbuf *pkt,\n-\t__rte_unused uint32_t red)\n-#endif\n+\t__rte_unused uint32_t drops)\n {\n \tuint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);\n \tuint32_t pkt_len = pkt->pkt_len;\n \n \tsubport->stats.n_pkts_tc_dropped[tc_index] += 1;\n \tsubport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;\n-#ifdef RTE_SCHED_RED\n-\tsubport->stats.n_pkts_red_dropped[tc_index] += red;\n+#ifdef RTE_SCHED_AQM\n+\tsubport->stats.n_pkts_aqm_dropped[tc_index] += drops;\n #endif\n }\n \n@@ -1752,58 +1818,61 @@ rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,\n \tqe->stats.n_bytes += pkt_len;\n }\n \n-#ifdef RTE_SCHED_RED\n-static inline void\n-rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,\n-\tuint32_t qindex,\n-\tstruct rte_mbuf *pkt,\n-\tuint32_t red)\n-#else\n static inline void\n rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,\n \tuint32_t qindex,\n \tstruct rte_mbuf *pkt,\n-\t__rte_unused uint32_t red)\n-#endif\n+\t__rte_unused uint32_t drops)\n {\n \tstruct rte_sched_queue_extra *qe = subport->queue_extra + qindex;\n \tuint32_t pkt_len = pkt->pkt_len;\n \n \tqe->stats.n_pkts_dropped += 1;\n \tqe->stats.n_bytes_dropped += pkt_len;\n-#ifdef RTE_SCHED_RED\n-\tqe->stats.n_pkts_red_dropped += red;\n+#ifdef RTE_SCHED_AQM\n+\tqe->stats.n_pkts_aqm_dropped += drops;\n #endif\n }\n \n #endif /* RTE_SCHED_COLLECT_STATS */\n \n-#ifdef RTE_SCHED_RED\n+#ifdef RTE_SCHED_AQM\n \n static inline int\n-rte_sched_port_red_drop(struct rte_sched_port *port,\n+rte_sched_port_aqm_drop(struct rte_sched_port *port,\n \tstruct rte_sched_subport *subport,\n \tstruct rte_mbuf *pkt,\n \tuint32_t qindex,\n \tuint16_t qlen)\n {\n \tstruct rte_sched_queue_extra *qe;\n-\tstruct rte_red_config *red_cfg;\n-\tstruct rte_red *red;\n \tuint32_t tc_index;\n-\tenum rte_color color;\n \n \ttc_index = rte_sched_port_pipe_tc(port, qindex);\n-\tcolor = rte_sched_port_pkt_read_color(pkt);\n-\tred_cfg = &subport->red_config[tc_index][color];\n+\tqe = subport->queue_extra + qindex;\n \n-\tif ((red_cfg->min_th | red_cfg->max_th) == 0)\n-\t\treturn 0;\n+\t/* WRED */\n+\tif (subport->aqm == RTE_SCHED_AQM_WRED) {\n+\t\tstruct rte_red_config *red_cfg;\n+\t\tstruct rte_red *red;\n+\t\tenum rte_color color;\n \n-\tqe = subport->queue_extra + qindex;\n-\tred = &qe->red;\n+\t\tcolor = rte_sched_port_pkt_read_color(pkt);\n+\t\tred_cfg = &subport->wred_config[tc_index][color];\n+\n+\t\tif ((red_cfg->min_th | red_cfg->max_th) == 0)\n+\t\t\treturn 0;\n \n-\treturn rte_red_enqueue(red_cfg, red, qlen, port->time);\n+\t\tred = &qe->red;\n+\n+\t\treturn rte_red_enqueue(red_cfg, red, qlen, port->time);\n+\t}\n+\n+\t/* PIE */\n+\tstruct rte_pie_config *pie_cfg = &subport->pie_config[tc_index];\n+\tstruct rte_pie *pie = &qe->pie;\n+\n+\treturn rte_pie_enqueue(pie_cfg, pie, pkt->pkt_len, qlen, port->time_cpu_cycles);\n }\n \n static inline void\n@@ -1811,14 +1880,29 @@ rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port,\n \tstruct rte_sched_subport *subport, uint32_t qindex)\n {\n \tstruct rte_sched_queue_extra *qe = subport->queue_extra + qindex;\n-\tstruct rte_red *red = &qe->red;\n+\tif (subport->aqm == RTE_SCHED_AQM_WRED) {\n+\t\tstruct rte_red *red = &qe->red;\n+\n+\t\trte_red_mark_queue_empty(red, port->time);\n+\t}\n+}\n+\n+static inline void\n+rte_sched_port_pie_dequeue(struct rte_sched_subport *subport,\n+uint32_t qindex, uint32_t pkt_len, uint64_t time) {\n+\tstruct rte_sched_queue_extra *qe = subport->queue_extra + qindex;\n+\tstruct rte_pie *pie = &qe->pie;\n \n-\trte_red_mark_queue_empty(red, port->time);\n+\t/* Update queue length */\n+\tpie->qlen -= 1;\n+\tpie->qlen_bytes -= pkt_len;\n+\n+\trte_pie_dequeue(pie, pkt_len, time);\n }\n \n #else\n \n-static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unused,\n+static inline int rte_sched_port_aqm_drop(struct rte_sched_port *port __rte_unused,\n \tstruct rte_sched_subport *subport __rte_unused,\n \tstruct rte_mbuf *pkt __rte_unused,\n \tuint32_t qindex __rte_unused,\n@@ -1829,7 +1913,7 @@ static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unus\n \n #define rte_sched_port_set_queue_empty_timestamp(port, subport, qindex)\n \n-#endif /* RTE_SCHED_RED */\n+#endif /* RTE_SCHED_AQM */\n \n #ifdef RTE_SCHED_DEBUG\n \n@@ -1925,7 +2009,7 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port,\n \tqlen = q->qw - q->qr;\n \n \t/* Drop the packet (and update drop stats) when queue is full */\n-\tif (unlikely(rte_sched_port_red_drop(port, subport, pkt, qindex, qlen) ||\n+\tif (unlikely(rte_sched_port_aqm_drop(port, subport, pkt, qindex, qlen) ||\n \t\t     (qlen >= qsize))) {\n \t\trte_pktmbuf_free(pkt);\n #ifdef RTE_SCHED_COLLECT_STATS\n@@ -2398,6 +2482,7 @@ grinder_schedule(struct rte_sched_port *port,\n {\n \tstruct rte_sched_grinder *grinder = subport->grinder + pos;\n \tstruct rte_sched_queue *queue = grinder->queue[grinder->qpos];\n+\tuint32_t qindex = grinder->qindex[grinder->qpos];\n \tstruct rte_mbuf *pkt = grinder->pkt;\n \tuint32_t pkt_len = pkt->pkt_len + port->frame_overhead;\n \tuint32_t be_tc_active;\n@@ -2417,15 +2502,19 @@ grinder_schedule(struct rte_sched_port *port,\n \t\t(pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;\n \n \tif (queue->qr == queue->qw) {\n-\t\tuint32_t qindex = grinder->qindex[grinder->qpos];\n-\n \t\trte_bitmap_clear(subport->bmp, qindex);\n \t\tgrinder->qmask &= ~(1 << grinder->qpos);\n \t\tif (be_tc_active)\n \t\t\tgrinder->wrr_mask[grinder->qpos] = 0;\n+\n \t\trte_sched_port_set_queue_empty_timestamp(port, subport, qindex);\n \t}\n \n+#ifdef RTE_SCHED_AQM\n+\tif (subport->aqm == RTE_SCHED_AQM_PIE)\n+\t\trte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles);\n+#endif\n+\n \t/* Reset pipe loop detection */\n \tsubport->pipe_loop = RTE_SCHED_PIPE_INVALID;\n \tgrinder->productive = 1;\ndiff --git a/lib/sched/rte_sched.h b/lib/sched/rte_sched.h\nindex c1a772b70c..a5fe6266cd 100644\n--- a/lib/sched/rte_sched.h\n+++ b/lib/sched/rte_sched.h\n@@ -61,9 +61,10 @@ extern \"C\" {\n #include <rte_mbuf.h>\n #include <rte_meter.h>\n \n-/** Random Early Detection (RED) */\n-#ifdef RTE_SCHED_RED\n+/** Active Queue Management */\n+#ifdef RTE_SCHED_AQM\n #include \"rte_red.h\"\n+#include \"rte_pie.h\"\n #endif\n \n /** Maximum number of queues per pipe.\n@@ -110,6 +111,28 @@ extern \"C\" {\n #define RTE_SCHED_FRAME_OVERHEAD_DEFAULT      24\n #endif\n \n+/**\n+ * Active Queue Management (AQM) mode\n+ *\n+ * This is used for controlling the admission of packets into a packet queue or\n+ * group of packet queues on congestion.\n+ *\n+ * The *Random Early Detection (RED)* algorithm works by proactively dropping\n+ * more and more input packets as the queue occupancy builds up. When the queue\n+ * is full or almost full, RED effectively works as *tail drop*. The *Weighted\n+ * RED* algorithm uses a separate set of RED thresholds for each packet color.\n+ *\n+ * Similar to RED, Proportional Integral Controller Enhanced (PIE) randomly\n+ * drops a packet at the onset of the congestion and tries to control the\n+ * latency around the target value. The congestion detection, however, is based\n+ * on the queueing latency instead of the queue length like RED. For more\n+ * information, refer RFC8033.\n+ */\n+enum rte_sched_aqm_mode {\n+\tRTE_SCHED_AQM_WRED, /**< Weighted Random Early Detection (WRED) */\n+\tRTE_SCHED_AQM_PIE,  /**< Proportional Integral Controller Enhanced (PIE) */\n+};\n+\n /*\n  * Pipe configuration parameters. The period and credits_per_period\n  * parameters are measured in bytes, with one byte meaning the time\n@@ -174,9 +197,17 @@ struct rte_sched_subport_params {\n \t/** Max allowed profiles in the pipe profile table */\n \tuint32_t n_max_pipe_profiles;\n \n-#ifdef RTE_SCHED_RED\n-\t/** RED parameters */\n-\tstruct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];\n+#ifdef RTE_SCHED_AQM\n+\t/** Active Queue Management mode */\n+\tenum rte_sched_aqm_mode aqm;\n+\n+\tRTE_STD_C11\n+\tunion {\n+\t\t/** WRED parameters */\n+\t\tstruct rte_red_params wred_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];\n+\t\t/** PIE parameters */\n+\t\tstruct rte_pie_params pie_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];\n+\t};\n #endif\n };\n \n@@ -208,9 +239,9 @@ struct rte_sched_subport_stats {\n \t/** Number of bytes dropped for each traffic class */\n \tuint64_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];\n \n-#ifdef RTE_SCHED_RED\n-\t/** Number of packets dropped by red */\n-\tuint64_t n_pkts_red_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];\n+#ifdef RTE_SCHED_AQM\n+\t/** Number of packets dropped by active queue management scheme */\n+\tuint64_t n_pkts_aqm_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];\n #endif\n };\n \n@@ -222,9 +253,9 @@ struct rte_sched_queue_stats {\n \t/** Packets dropped */\n \tuint64_t n_pkts_dropped;\n \n-#ifdef RTE_SCHED_RED\n-\t/** Packets dropped by RED */\n-\tuint64_t n_pkts_red_dropped;\n+#ifdef RTE_SCHED_AQM\n+\t/** Packets dropped by active queue management scheme */\n+\tuint64_t n_pkts_aqm_dropped;\n #endif\n \n \t/** Bytes successfully written */\n",
    "prefixes": [
        "RFC",
        "v3",
        "1/3"
    ]
}