get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/57871/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 57871,
    "url": "http://patches.dpdk.org/api/patches/57871/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190823144602.58213-7-jasvinder.singh@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190823144602.58213-7-jasvinder.singh@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190823144602.58213-7-jasvinder.singh@intel.com",
    "date": "2019-08-23T14:45:53",
    "name": "[06/15] sched: modify pkt enqueue for subport config flexibility",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "017ab793cd508a6b3a1dad304fadc417a25002fa",
    "submitter": {
        "id": 285,
        "url": "http://patches.dpdk.org/api/people/285/?format=api",
        "name": "Jasvinder Singh",
        "email": "jasvinder.singh@intel.com"
    },
    "delegate": {
        "id": 10018,
        "url": "http://patches.dpdk.org/api/users/10018/?format=api",
        "username": "cristian_dumitrescu",
        "first_name": "Cristian",
        "last_name": "Dumitrescu",
        "email": "cristian.dumitrescu@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190823144602.58213-7-jasvinder.singh@intel.com/mbox/",
    "series": [
        {
            "id": 6115,
            "url": "http://patches.dpdk.org/api/series/6115/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=6115",
            "date": "2019-08-23T14:45:47",
            "name": "sched: subport level configuration of pipe nodes",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/6115/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/57871/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/57871/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7EE531C042;\n\tFri, 23 Aug 2019 16:46:24 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id CE4291BF08\n\tfor <dev@dpdk.org>; Fri, 23 Aug 2019 16:46:11 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t23 Aug 2019 07:46:11 -0700",
            "from silpixa00381635.ir.intel.com (HELO\n\tsilpixa00381635.ger.corp.intel.com) ([10.237.223.4])\n\tby orsmga001.jf.intel.com with ESMTP; 23 Aug 2019 07:46:09 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.64,421,1559545200\"; d=\"scan'208\";a=\"263211270\"",
        "From": "Jasvinder Singh <jasvinder.singh@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "cristian.dumitrescu@intel.com,\n\tLukasz Krakowiak <lukaszx.krakowiak@intel.com>",
        "Date": "Fri, 23 Aug 2019 15:45:53 +0100",
        "Message-Id": "<20190823144602.58213-7-jasvinder.singh@intel.com>",
        "X-Mailer": "git-send-email 2.21.0",
        "In-Reply-To": "<20190823144602.58213-1-jasvinder.singh@intel.com>",
        "References": "<20190823144602.58213-1-jasvinder.singh@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH 06/15] sched: modify pkt enqueue for subport\n\tconfig flexibility",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Modify scheduler packet enqueue operation of the scheduler to allow\ndifferent subports of the same port to have different configuration\nin terms of number of pipes, pipe queue sizes, etc.\n\nSigned-off-by: Jasvinder Singh <jasvinder.singh@intel.com>\nSigned-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>\n---\n lib/librte_sched/rte_sched.c | 278 ++++++++++++++++++++++-------------\n 1 file changed, 178 insertions(+), 100 deletions(-)",
    "diff": "diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c\nindex 6bafbd8fe..a5e4c45b4 100644\n--- a/lib/librte_sched/rte_sched.c\n+++ b/lib/librte_sched/rte_sched.c\n@@ -1452,10 +1452,10 @@ rte_sched_port_qindex(struct rte_sched_port *port,\n \tuint32_t queue)\n {\n \treturn ((subport & (port->n_subports_per_port - 1)) <<\n-\t\t\t(port->n_pipes_per_subport_log2 + 4)) |\n-\t\t\t((pipe & (port->n_pipes_per_subport - 1)) << 4) |\n-\t\t\t((rte_sched_port_pipe_queue(port, traffic_class) + queue) &\n-\t\t\t(RTE_SCHED_QUEUES_PER_PIPE - 1));\n+\t\t(port->n_max_pipes_per_subport_log2 + 4)) |\n+\t\t((pipe & (port->subports[subport]->n_pipes_per_subport - 1)) << 4) |\n+\t\t((rte_sched_port_pipe_queue(port, traffic_class) + queue) &\n+\t\t(RTE_SCHED_QUEUES_PER_PIPE - 1));\n }\n \n void\n@@ -1479,8 +1479,9 @@ rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,\n {\n \tuint32_t queue_id = rte_mbuf_sched_queue_get(pkt);\n \n-\t*subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);\n-\t*pipe = (queue_id >> 4) & (port->n_pipes_per_subport - 1);\n+\t*subport = queue_id >> (port->n_max_pipes_per_subport_log2 + 4);\n+\t*pipe = (queue_id >> 4) &\n+\t\t(port->subports[*subport]->n_pipes_per_subport - 1);\n \t*traffic_class = rte_sched_port_pipe_tc(port, queue_id);\n \t*queue = rte_sched_port_tc_queue(port, queue_id);\n }\n@@ -1524,7 +1525,7 @@ rte_sched_subport_read_stats(struct rte_sched_port *port,\n \t\treturn -EINVAL;\n \t}\n \n-\ts = port->subport + subport_id;\n+\ts = port->subports[subport_id];\n \n \t/* Copy subport stats and clear */\n \tmemcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));\n@@ -1597,43 +1598,50 @@ rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)\n #ifdef RTE_SCHED_COLLECT_STATS\n \n static inline void\n-rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)\n+rte_sched_port_update_subport_stats(struct rte_sched_port *port,\n+\tstruct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf *pkt)\n {\n-\tstruct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));\n \tuint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);\n \tuint32_t pkt_len = pkt->pkt_len;\n \n-\ts->stats.n_pkts_tc[tc_index] += 1;\n-\ts->stats.n_bytes_tc[tc_index] += pkt_len;\n+\tsubport->stats.n_pkts_tc[tc_index] += 1;\n+\tsubport->stats.n_bytes_tc[tc_index] += pkt_len;\n }\n \n #ifdef RTE_SCHED_RED\n static inline void\n rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,\n-\t\t\t\t\t\tuint32_t qindex,\n-\t\t\t\t\t\tstruct rte_mbuf *pkt, uint32_t red)\n+\tstruct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf *pkt,\n+\tuint32_t red)\n #else\n static inline void\n rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,\n-\t\t\t\t\t\tuint32_t qindex,\n-\t\t\t\t\t\tstruct rte_mbuf *pkt, __rte_unused uint32_t red)\n+\tstruct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf *pkt,\n+\t__rte_unused uint32_t red)\n #endif\n {\n-\tstruct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));\n \tuint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);\n \tuint32_t pkt_len = pkt->pkt_len;\n \n-\ts->stats.n_pkts_tc_dropped[tc_index] += 1;\n-\ts->stats.n_bytes_tc_dropped[tc_index] += pkt_len;\n+\tsubport->stats.n_pkts_tc_dropped[tc_index] += 1;\n+\tsubport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;\n #ifdef RTE_SCHED_RED\n-\ts->stats.n_pkts_red_dropped[tc_index] += red;\n+\tsubport->stats.n_pkts_red_dropped[tc_index] += red;\n #endif\n }\n \n static inline void\n-rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)\n+rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf *pkt)\n {\n-\tstruct rte_sched_queue_extra *qe = port->queue_extra + qindex;\n+\tstruct rte_sched_queue_extra *qe = subport->queue_extra + qindex;\n \tuint32_t pkt_len = pkt->pkt_len;\n \n \tqe->stats.n_pkts += 1;\n@@ -1642,17 +1650,19 @@ rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex,\n \n #ifdef RTE_SCHED_RED\n static inline void\n-rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,\n-\t\t\t\t\t\tuint32_t qindex,\n-\t\t\t\t\t\tstruct rte_mbuf *pkt, uint32_t red)\n+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf *pkt,\n+\tuint32_t red)\n #else\n static inline void\n-rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,\n-\t\t\t\t\t\tuint32_t qindex,\n-\t\t\t\t\t\tstruct rte_mbuf *pkt, __rte_unused uint32_t red)\n+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf *pkt,\n+\t__rte_unused uint32_t red)\n #endif\n {\n-\tstruct rte_sched_queue_extra *qe = port->queue_extra + qindex;\n+\tstruct rte_sched_queue_extra *qe = subport->queue_extra + qindex;\n \tuint32_t pkt_len = pkt->pkt_len;\n \n \tqe->stats.n_pkts_dropped += 1;\n@@ -1667,7 +1677,11 @@ rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,\n #ifdef RTE_SCHED_RED\n \n static inline int\n-rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)\n+rte_sched_port_red_drop(struct rte_sched_port *port,\n+\tstruct rte_sched_subport *subport,\n+\tstruct rte_mbuf *pkt,\n+\tuint32_t qindex,\n+\tuint16_t qlen)\n {\n \tstruct rte_sched_queue_extra *qe;\n \tstruct rte_red_config *red_cfg;\n@@ -1677,12 +1691,12 @@ rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint3\n \n \ttc_index = rte_sched_port_pipe_tc(port, qindex);\n \tcolor = rte_sched_port_pkt_read_color(pkt);\n-\tred_cfg = &port->red_config[tc_index][color];\n+\tred_cfg = &subport->red_config[tc_index][color];\n \n \tif ((red_cfg->min_th | red_cfg->max_th) == 0)\n \t\treturn 0;\n \n-\tqe = port->queue_extra + qindex;\n+\tqe = subport->queue_extra + qindex;\n \tred = &qe->red;\n \n \treturn rte_red_enqueue(red_cfg, red, qlen, port->time);\n@@ -1699,7 +1713,14 @@ rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t q\n \n #else\n \n-#define rte_sched_port_red_drop(port, pkt, qindex, qlen)             0\n+static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unused,\n+\tstruct rte_sched_subport *subport __rte_unused,\n+\tstruct rte_mbuf *pkt __rte_unused,\n+\tuint32_t qindex __rte_unused,\n+\tuint16_t qlen __rte_unused)\n+{\n+\treturn 0;\n+}\n \n #define rte_sched_port_set_queue_empty_timestamp(port, qindex)\n \n@@ -1734,63 +1755,79 @@ debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos,\n \n #endif /* RTE_SCHED_DEBUG */\n \n+static inline struct rte_sched_subport *\n+rte_sched_port_subport(struct rte_sched_port *port,\n+\tstruct rte_mbuf *pkt)\n+{\n+\tuint32_t queue_id = rte_mbuf_sched_queue_get(pkt);\n+\tuint32_t subport_id = queue_id >> (port->n_max_pipes_per_subport_log2 + 4);\n+\n+\treturn port->subports[subport_id];\n+}\n+\n static inline uint32_t\n-rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port,\n-\t\t\t\t       struct rte_mbuf *pkt)\n+rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,\n+\tstruct rte_mbuf *pkt, uint32_t subport_qmask)\n {\n \tstruct rte_sched_queue *q;\n #ifdef RTE_SCHED_COLLECT_STATS\n \tstruct rte_sched_queue_extra *qe;\n #endif\n \tuint32_t qindex = rte_mbuf_sched_queue_get(pkt);\n+\tuint32_t subport_queue_id = subport_qmask & qindex;\n \n-\tq = port->queue + qindex;\n+\tq = subport->queue + subport_queue_id;\n \trte_prefetch0(q);\n #ifdef RTE_SCHED_COLLECT_STATS\n-\tqe = port->queue_extra + qindex;\n+\tqe = subport->queue_extra + subport_queue_id;\n \trte_prefetch0(qe);\n #endif\n \n-\treturn qindex;\n+\treturn subport_queue_id;\n }\n \n static inline void\n rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,\n-\t\t\t\t     uint32_t qindex, struct rte_mbuf **qbase)\n+\tstruct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf **qbase)\n {\n \tstruct rte_sched_queue *q;\n \tstruct rte_mbuf **q_qw;\n \tuint16_t qsize;\n \n-\tq = port->queue + qindex;\n-\tqsize = rte_sched_port_qsize(port, qindex);\n+\tq = subport->queue + qindex;\n+\tqsize = rte_sched_subport_pipe_qsize(port, subport, qindex);\n \tq_qw = qbase + (q->qw & (qsize - 1));\n \n \trte_prefetch0(q_qw);\n-\trte_bitmap_prefetch0(port->bmp, qindex);\n+\trte_bitmap_prefetch0(subport->bmp, qindex);\n }\n \n static inline int\n-rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex,\n-\t\t\t   struct rte_mbuf **qbase, struct rte_mbuf *pkt)\n+rte_sched_port_enqueue_qwa(struct rte_sched_port *port,\n+\tstruct rte_sched_subport *subport,\n+\tuint32_t qindex,\n+\tstruct rte_mbuf **qbase,\n+\tstruct rte_mbuf *pkt)\n {\n \tstruct rte_sched_queue *q;\n \tuint16_t qsize;\n \tuint16_t qlen;\n \n-\tq = port->queue + qindex;\n-\tqsize = rte_sched_port_qsize(port, qindex);\n+\tq = subport->queue + qindex;\n+\tqsize = rte_sched_subport_pipe_qsize(port, subport, qindex);\n \tqlen = q->qw - q->qr;\n \n \t/* Drop the packet (and update drop stats) when queue is full */\n-\tif (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) ||\n+\tif (unlikely(rte_sched_port_red_drop(port, subport, pkt, qindex, qlen) ||\n \t\t     (qlen >= qsize))) {\n \t\trte_pktmbuf_free(pkt);\n #ifdef RTE_SCHED_COLLECT_STATS\n-\t\trte_sched_port_update_subport_stats_on_drop(port, qindex, pkt,\n-\t\t\t\t\t\t\t    qlen < qsize);\n-\t\trte_sched_port_update_queue_stats_on_drop(port, qindex, pkt,\n-\t\t\t\t\t\t\t  qlen < qsize);\n+\t\trte_sched_port_update_subport_stats_on_drop(port, subport,\n+\t\t\tqindex, pkt, qlen < qsize);\n+\t\trte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,\n+\t\t\tqlen < qsize);\n #endif\n \t\treturn 0;\n \t}\n@@ -1799,13 +1836,13 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex,\n \tqbase[q->qw & (qsize - 1)] = pkt;\n \tq->qw++;\n \n-\t/* Activate queue in the port bitmap */\n-\trte_bitmap_set(port->bmp, qindex);\n+\t/* Activate queue in the subport bitmap */\n+\trte_bitmap_set(subport->bmp, qindex);\n \n \t/* Statistics */\n #ifdef RTE_SCHED_COLLECT_STATS\n-\trte_sched_port_update_subport_stats(port, qindex, pkt);\n-\trte_sched_port_update_queue_stats(port, qindex, pkt);\n+\trte_sched_port_update_subport_stats(port, subport, qindex, pkt);\n+\trte_sched_port_update_queue_stats(subport, qindex, pkt);\n #endif\n \n \treturn 1;\n@@ -1833,17 +1870,22 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,\n \t\t*pkt30, *pkt31, *pkt_last;\n \tstruct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,\n \t\t**q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;\n+\tstruct rte_sched_subport *subport00, *subport01, *subport10, *subport11,\n+\t\t*subport20, *subport21, *subport30, *subport31, *subport_last;\n \tuint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;\n \tuint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;\n+\tuint32_t subport_qmask;\n \tuint32_t result, i;\n \n \tresult = 0;\n+\tsubport_qmask = (1 << (port->n_max_pipes_per_subport_log2 + 4)) - 1;\n \n \t/*\n \t * Less then 6 input packets available, which is not enough to\n \t * feed the pipeline\n \t */\n \tif (unlikely(n_pkts < 6)) {\n+\t\tstruct rte_sched_subport *subports[5];\n \t\tstruct rte_mbuf **q_base[5];\n \t\tuint32_t q[5];\n \n@@ -1851,22 +1893,26 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,\n \t\tfor (i = 0; i < n_pkts; i++)\n \t\t\trte_prefetch0(pkts[i]);\n \n+\t\t/* Prefetch the subport structure for each packet */\n+\t\tfor (i = 0; i < n_pkts; i++)\n+\t\t\tsubports[i] = rte_sched_port_subport(port, pkts[i]);\n+\n \t\t/* Prefetch the queue structure for each queue */\n \t\tfor (i = 0; i < n_pkts; i++)\n-\t\t\tq[i] = rte_sched_port_enqueue_qptrs_prefetch0(port,\n-\t\t\t\t\t\t\t\t      pkts[i]);\n+\t\t\tq[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],\n+\t\t\t\t\tpkts[i], subport_qmask);\n \n \t\t/* Prefetch the write pointer location of each queue */\n \t\tfor (i = 0; i < n_pkts; i++) {\n-\t\t\tq_base[i] = rte_sched_port_qbase(port, q[i]);\n-\t\t\trte_sched_port_enqueue_qwa_prefetch0(port, q[i],\n-\t\t\t\t\t\t\t     q_base[i]);\n+\t\t\tq_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);\n+\t\t\trte_sched_port_enqueue_qwa_prefetch0(port, subports[i],\n+\t\t\t\tq[i], q_base[i]);\n \t\t}\n \n \t\t/* Write each packet to its queue */\n \t\tfor (i = 0; i < n_pkts; i++)\n-\t\t\tresult += rte_sched_port_enqueue_qwa(port, q[i],\n-\t\t\t\t\t\t\t     q_base[i], pkts[i]);\n+\t\t\tresult += rte_sched_port_enqueue_qwa(port, subports[i],\n+\t\t\t\t\t\tq[i], q_base[i], pkts[i]);\n \n \t\treturn result;\n \t}\n@@ -1882,21 +1928,29 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,\n \trte_prefetch0(pkt10);\n \trte_prefetch0(pkt11);\n \n-\tq20 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt20);\n-\tq21 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt21);\n+\tsubport20 = rte_sched_port_subport(port, pkt20);\n+\tsubport21 = rte_sched_port_subport(port, pkt21);\n+\tq20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,\n+\t\t\tpkt20, subport_qmask);\n+\tq21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,\n+\t\t\tpkt21, subport_qmask);\n \n \tpkt00 = pkts[4];\n \tpkt01 = pkts[5];\n \trte_prefetch0(pkt00);\n \trte_prefetch0(pkt01);\n \n-\tq10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);\n-\tq11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);\n+\tsubport10 = rte_sched_port_subport(port, pkt10);\n+\tsubport11 = rte_sched_port_subport(port, pkt11);\n+\tq10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,\n+\t\t\tpkt10, subport_qmask);\n+\tq11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,\n+\t\t\tpkt11, subport_qmask);\n \n-\tq20_base = rte_sched_port_qbase(port, q20);\n-\tq21_base = rte_sched_port_qbase(port, q21);\n-\trte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);\n-\trte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);\n+\tq20_base = rte_sched_subport_pipe_qbase(subport20, q20);\n+\tq21_base = rte_sched_subport_pipe_qbase(subport21, q21);\n+\trte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);\n+\trte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);\n \n \t/* Run the pipeline */\n \tfor (i = 6; i < (n_pkts & (~1)); i += 2) {\n@@ -1911,6 +1965,10 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,\n \t\tq31 = q21;\n \t\tq20 = q10;\n \t\tq21 = q11;\n+\t\tsubport30 = subport20;\n+\t\tsubport31 = subport21;\n+\t\tsubport20 = subport10;\n+\t\tsubport21 = subport11;\n \t\tq30_base = q20_base;\n \t\tq31_base = q21_base;\n \n@@ -1920,19 +1978,25 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,\n \t\trte_prefetch0(pkt00);\n \t\trte_prefetch0(pkt01);\n \n-\t\t/* Stage 1: Prefetch queue structure storing queue pointers */\n-\t\tq10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);\n-\t\tq11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);\n+\t\t/* Stage 1: Prefetch subport and queue structure storing queue pointers */\n+\t\tsubport10 = rte_sched_port_subport(port, pkt10);\n+\t\tsubport11 = rte_sched_port_subport(port, pkt11);\n+\t\tq10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,\n+\t\t\t\tpkt10, subport_qmask);\n+\t\tq11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,\n+\t\t\t\tpkt11, subport_qmask);\n \n \t\t/* Stage 2: Prefetch queue write location */\n-\t\tq20_base = rte_sched_port_qbase(port, q20);\n-\t\tq21_base = rte_sched_port_qbase(port, q21);\n-\t\trte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);\n-\t\trte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);\n+\t\tq20_base = rte_sched_subport_pipe_qbase(subport20, q20);\n+\t\tq21_base = rte_sched_subport_pipe_qbase(subport21, q21);\n+\t\trte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);\n+\t\trte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);\n \n \t\t/* Stage 3: Write packet to queue and activate queue */\n-\t\tr30 = rte_sched_port_enqueue_qwa(port, q30, q30_base, pkt30);\n-\t\tr31 = rte_sched_port_enqueue_qwa(port, q31, q31_base, pkt31);\n+\t\tr30 = rte_sched_port_enqueue_qwa(port, subport30,\n+\t\t\t\tq30, q30_base, pkt30);\n+\t\tr31 = rte_sched_port_enqueue_qwa(port, subport31,\n+\t\t\t\tq31, q31_base, pkt31);\n \t\tresult += r30 + r31;\n \t}\n \n@@ -1944,38 +2008,52 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,\n \tpkt_last = pkts[n_pkts - 1];\n \trte_prefetch0(pkt_last);\n \n-\tq00 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt00);\n-\tq01 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt01);\n-\n-\tq10_base = rte_sched_port_qbase(port, q10);\n-\tq11_base = rte_sched_port_qbase(port, q11);\n-\trte_sched_port_enqueue_qwa_prefetch0(port, q10, q10_base);\n-\trte_sched_port_enqueue_qwa_prefetch0(port, q11, q11_base);\n-\n-\tr20 = rte_sched_port_enqueue_qwa(port, q20, q20_base, pkt20);\n-\tr21 = rte_sched_port_enqueue_qwa(port, q21, q21_base, pkt21);\n+\tsubport00 = rte_sched_port_subport(port, pkt00);\n+\tsubport01 = rte_sched_port_subport(port, pkt01);\n+\tq00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,\n+\t\t\tpkt00, subport_qmask);\n+\tq01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,\n+\t\t\tpkt01, subport_qmask);\n+\n+\tq10_base = rte_sched_subport_pipe_qbase(subport10, q10);\n+\tq11_base = rte_sched_subport_pipe_qbase(subport11, q11);\n+\trte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);\n+\trte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);\n+\n+\tr20 = rte_sched_port_enqueue_qwa(port, subport20,\n+\t\t\tq20, q20_base, pkt20);\n+\tr21 = rte_sched_port_enqueue_qwa(port, subport21,\n+\t\t\tq21, q21_base, pkt21);\n \tresult += r20 + r21;\n \n-\tq_last = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt_last);\n+\tsubport_last = rte_sched_port_subport(port, pkt_last);\n+\tq_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,\n+\t\t\t\tpkt_last, subport_qmask);\n \n-\tq00_base = rte_sched_port_qbase(port, q00);\n-\tq01_base = rte_sched_port_qbase(port, q01);\n-\trte_sched_port_enqueue_qwa_prefetch0(port, q00, q00_base);\n-\trte_sched_port_enqueue_qwa_prefetch0(port, q01, q01_base);\n+\tq00_base = rte_sched_subport_pipe_qbase(subport00, q00);\n+\tq01_base = rte_sched_subport_pipe_qbase(subport01, q01);\n+\trte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);\n+\trte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);\n \n-\tr10 = rte_sched_port_enqueue_qwa(port, q10, q10_base, pkt10);\n-\tr11 = rte_sched_port_enqueue_qwa(port, q11, q11_base, pkt11);\n+\tr10 = rte_sched_port_enqueue_qwa(port, subport10, q10,\n+\t\t\tq10_base, pkt10);\n+\tr11 = rte_sched_port_enqueue_qwa(port, subport11, q11,\n+\t\t\tq11_base, pkt11);\n \tresult += r10 + r11;\n \n-\tq_last_base = rte_sched_port_qbase(port, q_last);\n-\trte_sched_port_enqueue_qwa_prefetch0(port, q_last, q_last_base);\n+\tq_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);\n+\trte_sched_port_enqueue_qwa_prefetch0(port, subport_last,\n+\t\tq_last, q_last_base);\n \n-\tr00 = rte_sched_port_enqueue_qwa(port, q00, q00_base, pkt00);\n-\tr01 = rte_sched_port_enqueue_qwa(port, q01, q01_base, pkt01);\n+\tr00 = rte_sched_port_enqueue_qwa(port, subport00, q00,\n+\t\t\tq00_base, pkt00);\n+\tr01 = rte_sched_port_enqueue_qwa(port, subport01, q01,\n+\t\t\tq01_base, pkt01);\n \tresult += r00 + r01;\n \n \tif (n_pkts & 1) {\n-\t\tr_last = rte_sched_port_enqueue_qwa(port, q_last, q_last_base, pkt_last);\n+\t\tr_last = rte_sched_port_enqueue_qwa(port, subport_last,\n+\t\t\t\t\tq_last,\tq_last_base, pkt_last);\n \t\tresult += r_last;\n \t}\n \n",
    "prefixes": [
        "06/15"
    ]
}